shell bypass 403

Cubjrnet7 Shell


name : lexer.cpython-39.pyc
a

�`{v�@s�dZddlZddlmZddlmZddlmZddlm	Z	ddlm
Z
dd	lmZdd
lmZddl
mZddlmZed
�Ze�dej�Ze�d�Ze�dej�Ze�d�Ze�dejejB�Zzeddd�Wn ey�e�d�ZdZYn0ddlm ZdZe
d�Z!e
d�Z"e
d�Z#e
d�Z$e
d�Z%e
d�Z&e
d �Z'e
d!�Z(e
d"�Z)e
d#�Z*e
d$�Z+e
d%�Z,e
d&�Z-e
d'�Z.e
d(�Z/e
d)�Z0e
d*�Z1e
d+�Z2e
d,�Z3e
d-�Z4e
d.�Z5e
d/�Z6e
d0�Z7e
d1�Z8e
d2�Z9e
d3�Z:e
d4�Z;e
d5�Z<e
d6�Z=e
d7�Z>e
d8�Z?e
d9�Z@e
d:�ZAe
d;�ZBe
d<�ZCe
d=�ZDe
d>�ZEe
d?�ZFe
d@�ZGe
dA�ZHe
dB�ZIe
dC�ZJe
dD�ZKe
dE�ZLe
dF�ZMe
dG�ZNe
dH�ZOe
dI�ZPe
dJ�ZQe!e9e%e(e1e0e4e:e,e6e-e7e+e5e'e2e)e*e.e/e"e&e#e3e$e8dK�ZReSdLdM�eeR�D��ZTeUeR�eUeT�k�s�JdN��e�dOdP�VdQdR�eWeRdSdT�dU�D���ZXeYeGeIeHe;eLeMeNg�ZZeYe;eOeIeNg�Z[dVdW�Z\dXdY�Z]dZd[�Z^d\d]�Z_d^d_�Z`Gd`da�daea�ZbGdbdc�dcec�Zde	Gddde�deea��Zee	Gdfdg�dgea��Zfdhdi�ZgGdjdk�dkec�ZhGdldm�dmea�ZidS)nz�Implements a Jinja / Python combination lexer. The ``Lexer`` class
is used to do some preprocessing. It filters out invalid operators like
the bitshift operators we don't allow in templates. It separates
template code and python code in expressions.
�N)�literal_eval)�deque)�
itemgetter�)�implements_iterator)�intern)�	iteritems)�	text_type)�TemplateSyntaxError)�LRUCache�2z\s+z(\r\n|\r|\n)z7('([^'\\]*(?:\\.[^'\\]*)*)'|"([^"\\]*(?:\\.[^"\\]*)*)")z
(\d+_)*\d+z�
    (?<!\.)  # doesn't start with a .
    (\d+_)*\d+  # digits, possibly _ separated
    (
        (\.(\d+_)*\d+)?  # optional fractional part
        e[+\-]?(\d+_)*\d+  # exponent part
    |
        \.(\d+_)*\d+  # required fractional part
    )
    ufööz	<unknown>�evalz[a-zA-Z_][a-zA-Z0-9_]*F)�patternT�addZassign�colonZcommaZdiv�dot�eq�floordiv�gtZgteqZlbraceZlbracketZlparen�ltZlteq�mod�mul�ne�pipe�pow�rbraceZrbracketZrparenZ	semicolon�sub�tildeZ
whitespace�floatZinteger�name�string�operatorZblock_beginZ	block_endZvariable_beginZvariable_endZ	raw_beginZraw_endZ
comment_beginZcomment_end�commentZlinestatement_beginZlinestatement_endZlinecomment_beginZlinecomment_endZlinecomment�data�initial�eof)�+�-�/z//�*�%z**�~�[�]�(�)�{�}z==z!=�>z>=�<z<=�=�.�:�|�,�;cCsg|]\}}||f�qS�r:)�.0�k�vr:r:�0/usr/lib/python3.9/site-packages/jinja2/lexer.py�
<listcomp>��r?zoperators droppedz(%s)r7ccs|]}t�|�VqdS�N)�re�escape�r;�xr:r:r>�	<genexpr>�r@rFcCs
t|�SrA)�len�rEr:r:r>�<lambda>�r@rI)�keycCsL|tvrt|Stdtdtdtdtdtdtdtdt	dt
d	td
tdi�
||�S)Nzbegin of commentzend of commentr"zbegin of statement blockzend of statement blockzbegin of print statementzend of print statementzbegin of line statementzend of line statementztemplate data / textzend of template)�reverse_operators�TOKEN_COMMENT_BEGIN�TOKEN_COMMENT_END�
TOKEN_COMMENT�TOKEN_LINECOMMENT�TOKEN_BLOCK_BEGIN�TOKEN_BLOCK_END�TOKEN_VARIABLE_BEGIN�TOKEN_VARIABLE_END�TOKEN_LINESTATEMENT_BEGIN�TOKEN_LINESTATEMENT_END�
TOKEN_DATA�	TOKEN_EOF�get)�
token_typer:r:r>�_describe_token_type�s"�
�rZcCs|jtkr|jSt|j�S)z#Returns a description of the token.)�type�
TOKEN_NAME�valuerZ)�tokenr:r:r>�describe_token�s
r_cCs2d|vr&|�dd�\}}|tkr*|Sn|}t|�S)z0Like `describe_token` but for token expressions.r6r)�splitr\rZ)�exprr[r]r:r:r>�describe_token_expr�srbcCstt�|��S)zsCount the number of newline characters in the string.  This is
    useful for extensions that filter a stream.
    )rG�
newline_re�findall)r]r:r:r>�count_newlines�srecCs�tj}t|j�t||j�ft|j�t||j�ft|j�t||j�fg}|j	durp|�
t|j	�td||j	�f�|jdur�|�
t|j�t
d||j�f�dd�t|dd�D�S)zACompiles all the rules from the environment into a list of rules.Nz	^[ \t\v]*z(?:^|(?<=\S))[^\S\r\n]*cSsg|]}|dd��qS)rNr:rDr:r:r>r?�r@z!compile_rules.<locals>.<listcomp>T)�reverse)rBrCrG�comment_start_stringrL�block_start_stringrP�variable_start_stringrR�line_statement_prefix�appendrT�line_comment_prefix�TOKEN_LINECOMMENT_BEGIN�sorted)�environment�e�rulesr:r:r>�
compile_rules�s:����
��
��rrc@s$eZdZdZefdd�Zdd�ZdS)�FailurezjClass that raises a `TemplateSyntaxError` if called.
    Used by the `Lexer` to specify known errors.
    cCs||_||_dSrA)�message�error_class)�selfrt�clsr:r:r>�__init__�szFailure.__init__cCs|�|j||��dSrA)rurt)rv�lineno�filenamer:r:r>�__call__�szFailure.__call__N)�__name__�
__module__�__qualname__�__doc__r
rxr{r:r:r:r>rs�srsc@sTeZdZdZdZdd�ed�D�\ZZZdd�Z	dd	�Z
d
d�Zdd
�Zdd�Z
dS)�TokenzToken class.r:ccs|]}tt|��VqdSrA)�propertyrrDr:r:r>rFr@zToken.<genexpr>�cCst�||tt|��|f�SrA)�tuple�__new__r�str)rwryr[r]r:r:r>r�	sz
Token.__new__cCs*|jtvrt|jS|jdkr$|jS|jS)Nr)r[rKr]�rvr:r:r>�__str__s



z
Token.__str__cCs2|j|krdSd|vr.|�dd�|j|jgkSdS)z�Test a token against a token expression.  This can either be a
        token type or ``'token_type:token_value'``.  This can only test
        against string values and types.
        Tr6rF)r[r`r]�rvrar:r:r>�tests

z
Token.testcGs|D]}|�|�rdSqdS)z(Test against multiple token expressions.TF)r�)rv�iterablerar:r:r>�test_any s
zToken.test_anycCsd|j|j|jfS)NzToken(%r, %r, %r))ryr[r]r�r:r:r>�__repr__'szToken.__repr__N)r|r}r~r�	__slots__�rangeryr[r]r�r�r�r�r�r:r:r:r>r�s
r�c@s(eZdZdZdd�Zdd�Zdd�ZdS)	�TokenStreamIteratorz`The iterator for tokenstreams.  Iterate over the stream
    until the eof token is reached.
    cCs
||_dSrA)�stream)rvr�r:r:r>rx1szTokenStreamIterator.__init__cCs|SrAr:r�r:r:r>�__iter__4szTokenStreamIterator.__iter__cCs0|jj}|jtur"|j��t��t|j�|SrA)r��currentr[rW�close�
StopIteration�next�rvr^r:r:r>�__next__7s


zTokenStreamIterator.__next__N)r|r}r~rrxr�r�r:r:r:r>r�+sr�c@szeZdZdZdd�Zdd�Zdd�ZeZedd	��Z	d
d�Z
dd
�Zddd�Zdd�Z
dd�Zdd�Zdd�Zdd�ZdS)�TokenStreamz�A token stream is an iterable that yields :class:`Token`\s.  The
    parser however does not iterate over it but calls :meth:`next` to go
    one token ahead.  The current active token is stored as :attr:`current`.
    cCs>t|�|_t�|_||_||_d|_tdtd�|_	t
|�dS)NFr�)�iter�_iterr�_pushedrrz�closedr��
TOKEN_INITIALr�r�)rv�	generatorrrzr:r:r>rxGs
zTokenStream.__init__cCst|�SrA)r�r�r:r:r>r�PszTokenStream.__iter__cCst|j�p|jjtuSrA)�boolr�r�r[rWr�r:r:r>�__bool__SszTokenStream.__bool__cCs|S)z Are we at the end of the stream?r:r�r:r:r>�eosXszTokenStream.eoscCs|j�|�dS)z Push a token back to the stream.N)r�rkr�r:r:r>�push]szTokenStream.pushcCs"t|�}|j}|�|�||_|S)zLook at the next token.)r�r�r�)rvZ	old_token�resultr:r:r>�lookas

zTokenStream.lookrcCst|�D]}t|�qdS)zGot n tokens ahead.N)r�r�)rv�n�_r:r:r>�skipiszTokenStream.skipcCs|j�|�rt|�SdS)zqPerform the token test and return the token if it matched.
        Otherwise the return value is `None`.
        N)r�r�r�r�r:r:r>�next_ifnszTokenStream.next_ifcCs|�|�duS)z8Like :meth:`next_if` but only returns `True` or `False`.N)r�r�r:r:r>�skip_ifuszTokenStream.skip_ifcCsV|j}|jr|j��|_n8|jjturRzt|j�|_WntyP|��Yn0|S)z|Go one token ahead and return the old one.

        Use the built-in :func:`next` instead of calling this directly.
        )	r�r��popleftr[rWr�r�r�r�)rv�rvr:r:r>r�yszTokenStream.__next__cCs"t|jjtd�|_d|_d|_dS)zClose the stream.r�NT)r�r�ryrWr�r�r�r:r:r>r��szTokenStream.closecCs~|j�|�s^t|�}|jjtur:td||jj|j|j��td|t	|j�f|jj|j|j��z|jWt
|�St
|�0dS)z}Expect a given token type and return it.  This accepts the same
        argument as :meth:`jinja2.lexer.Token.test`.
        z(unexpected end of template, expected %r.zexpected token %r, got %rN)r�r�rbr[rWr
ryrrzr_r�r�r:r:r>�expect�s(���zTokenStream.expectN)r)r|r}r~rrxr�r�Z__nonzero__r�r�r�r�r�r�r�r�r�r�r:r:r:r>r�@s	

r�cCsZ|j|j|j|j|j|j|j|j|j|j	|j
|jf}t�
|�}|durVt|�}|t|<|S)z(Return a lexer which is probably cached.N)rh�block_end_stringri�variable_end_stringrg�comment_end_stringrjrl�trim_blocks�
lstrip_blocks�newline_sequence�keep_trailing_newline�_lexer_cacherX�Lexer)rorJZlexerr:r:r>�	get_lexer�s$�
r�cs$eZdZdZdZ�fdd�Z�ZS)�OptionalLStripzWA special tuple for marking a point in the state that can have
    lstrip applied.
    r:cstt|��||�SrA)�superr�r�)rw�members�kwargs��	__class__r:r>r��szOptionalLStrip.__new__)r|r}r~rr�r��
__classcell__r:r:r�r>r��sr�c@s>eZdZdZdd�Zdd�Zd
dd�Zdd	d
�Zddd�ZdS)r�a
Class that implements a lexer for a given environment. Automatically
    created by the environment class, usually you don't have to do that.

    Note that the lexer is not automatically bound to an environment.
    Multiple environments can share the same lexer.
    cCs�tj}dd�}ttdfttdfttdftt	dft
tdftt
dfg}t|�}|jrTdpVd}|jrf|d�nd|_|j|_|j|_d|dd�d	||j�||j�||j�fgd
d�|D���ttd�df|d
�tdfgt|d||j�||j�|f�ttfdf|d�td�fdfgt|d||j�||j�|f�tdfg|t |d||j!�||j!�f�t"dfg|t#|d||j�||j�||j�|f�ttt$�df|d�td�fdfgt%|d�t&dfg|t'|d�t(t)fdfgi|_*dS)NcSst�|tjtjB�SrA)rB�compile�M�SrHr:r:r>�c�szLexer.__init__.<locals>.cz\n?r�z[^ \t]�rootz(.*?)(?:%s)r7z0(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))cSsg|]\}}d||f�qS)z(?P<%s>%s(\-|\+|))r:)r;r��rr:r:r>r?s�z"Lexer.__init__.<locals>.<listcomp>�#bygroupz.+z(.*?)((?:\-%s\s*|%s)%s)�#popz(.)zMissing end of comment tagz(?:\-%s\s*|%s)%sz
\-%s\s*|%sz1(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))zMissing end of raw directivez	\s*(\n|$)z(.*?)()(?=\n|$))+rBrC�
whitespace_re�TOKEN_WHITESPACE�float_re�TOKEN_FLOAT�
integer_re�
TOKEN_INTEGER�name_rer\�	string_re�TOKEN_STRING�operator_re�TOKEN_OPERATORrrr�r��lstrip_unless_rer�r��joinrhr�r�rVrLr�rNrMrsrPrQrRr�rS�TOKEN_RAW_BEGIN�
TOKEN_RAW_ENDrTrUrmrO�TOKEN_LINECOMMENT_ENDrq)rvrorpr�Z	tag_rulesZroot_tag_rulesZblock_suffix_rer:r:r>rx�s����������������������������
����	�
������zLexer.__init__cCst�|j|�S)z@Called for strings and template data to normalize it to unicode.)rcrr�)rvr]r:r:r>�_normalize_newlinesaszLexer._normalize_newlinesNcCs&|�||||�}t|�|||�||�S)z:Calls tokeniter + tokenize and wraps it in a token stream.)�	tokeniterr��wrap)rv�sourcerrz�stater�r:r:r>�tokenizeeszLexer.tokenizec	csz|D�]n\}}}|tvrq�nH|tkr.t}�n8|tkr>t}�n(|ttfvrPq�n|tkrf|�|�}�n|dkrt|}n�|t	kr�t
|�}tr�|��s�t
d|||��n�|tk�rz$|�|dd���dd��d�}WnJt�y}z0t
|��d�d��}t
||||��WYd	}~n
d	}~00nJ|tk�r8t|�d
d��}n.|tk�rTt|�d
d��}n|tk�rft|}t|||�Vqd	S)z�This is called with the stream as returned by `tokenize` and wraps
        every token in a :class:`Token` and converts the value.
        �keywordzInvalid character in identifierr����ascii�backslashreplacezunicode-escaper6Nr�r�)�ignored_tokensrTrPrUrQr�r�rVr�r\r��check_ident�isidentifierr
r��encode�decode�	Exceptionr`�stripr��int�replacer�rr��	operatorsr�)	rvr�rrzryr^r]rp�msgr:r:r>r�jsJ�
���&


z
Lexer.wrapc csDt|�}|��}|jr<|r<dD]}|�|�r|�d�q<qd�|�}d}d}dg}	|dur�|dkr�|dvstJd	��|	�|d
�|j|	d}
t|�}g}|j}
d}d}|
D�]d\}}}|�	||�}|dur�q�|r�|t
ttfvr�q�t
|t��r�|��}t
|t��r�|d}td
d�|ddd�D��}|dk�rd|��}|t|�d��d�}|f|dd�}nj|dk�r�|
du�r�|���t��s�|�d�d}|dk�s�|�r�|
�||��s�|d|�f|dd�}t|�D]�\}}|jtu�r�|||��n�|dk�rPt|���D]4\}}|du�r|||fV||�d�7}�q��qtd|��n:||}|�sh|tv�rt|||fV||�d�|7}d}�q�n�|��}|t k�r.|dk�r�|�d�nv|dk�r�|�d�n`|dk�r�|�d�nJ|dv�r.|�st!d||||��|�"�}||k�r.t!d||f|||��|�s>|tv�rJ|||fV||�d�7}|��dd�dk}|�#�}|du�r�|dk�r�|	�"�nT|dk�r�t|���D]$\}}|du�r�|	�|��q�q�td|��n
|	�|�|j|	d}
n||k�rtd|��|}q�q�||k�r$dSt!d |||f|||��q�dS)!z�This method tokenizes the text and returns the tokens in a
        generator.  Use this method if you just want to tokenize a template.
        )z
�
�
r�r�rrr�N)�variable�blockz
invalid stateZ_beginr�Tcss|]}|dur|VqdSrAr:)r;�gr:r:r>rF�r@z"Lexer.tokeniter.<locals>.<genexpr>�r'r&r�z?%r wanted to resolve the token dynamically but no group matchedr0r1r.r/r,r-)r1r/r-zunexpected '%s'zunexpected '%s', expected '%s'r�zC%r wanted to resolve the new state dynamically but no group matchedz,%r yielded empty string without stack changezunexpected char %r at %d)$r	�
splitlinesr��endswithrkr�rqrGr��matchrSrQrU�
isinstancer��groupsr�r��rstrip�count�	groupdictrXrR�rfind�search�	enumerater�rsr�RuntimeError�ignore_if_empty�groupr�r
�pop�end) rvr�rrzr��lines�newline�posry�stackZstatetokensZ
source_lengthZbalancing_stackr�Znewlines_strippedZ
line_startingZregex�tokensZ	new_state�mr��textZ
strip_sign�strippedZl_pos�idxr^rJr]r#Zexpected_opZpos2r:r:r>r��s�



�
���	


��





�
��






��

�
�zLexer.tokeniter)NNN)NN)NN)	r|r}r~rrxr�r�r�r�r:r:r:r>r��s

,r�)jrrB�astr�collectionsrr!rZ_compatrrrr	�
exceptionsr
Zutilsrr�r��Ur�rcr�r�r��
IGNORECASE�VERBOSEr��SyntaxErrorr�r�Z_identifierrZ	TOKEN_ADDZTOKEN_ASSIGNZTOKEN_COLONZTOKEN_COMMAZ	TOKEN_DIVZ	TOKEN_DOTZTOKEN_EQZTOKEN_FLOORDIVZTOKEN_GTZ
TOKEN_GTEQZTOKEN_LBRACEZTOKEN_LBRACKETZTOKEN_LPARENZTOKEN_LTZ
TOKEN_LTEQZ	TOKEN_MODZ	TOKEN_MULZTOKEN_NEZ
TOKEN_PIPEZ	TOKEN_POWZTOKEN_RBRACEZTOKEN_RBRACKETZTOKEN_RPARENZTOKEN_SEMICOLONZ	TOKEN_SUBZTOKEN_TILDEr�r�r�r\r�r�rPrQrRrSr�r�rLrMrNrTrUrmr�rOrVr�rWr��dictrKrGr�rnr��	frozensetr�r�rZr_rbrerr�objectrsr�r�r�r�r�r�r�r:r:r:r>�<module>s
�


�

�"���
�)
(f

© 2025 Cubjrnet7