3
6b	                 @   sZ   d Z ddlmZ ddlmZ ddlmZmZmZ ddl	m
Z
 G dd deZdd	d
ZdS )z	SQL Lexer    )tokens)	SQL_REGEX)
bytes_type	text_type
file_types)consumec               @   s   e Zd ZdZedddZdS )Lexerz?Lexer
    Empty class. Leaving for backwards-compatibility
    Nc             c   s  t | tr| j } t | trn\t | trh|r8| j|} qzy| jd} W qz tk
rd   | jd} Y qzX ntdjt	| t
| }x|D ]\}}xxtD ]d\}}|| |}|sqn2t |tjr||j fV  nt|r||j V  t||j | d  P qW tj|fV  qW dS )a  
        Return an iterable of (tokentype, value) pairs generated from
        `text`. If `unfiltered` is set to `True`, the filtering mechanism
        is bypassed even if filters are defined.

        Also preprocess the text, i.e. expand tabs and strip it if
        wanted and applies registered filters.

        Split ``text`` into (tokentype, text) pairs.

        ``stack`` is the inital stack (default: ``['root']``)
        zutf-8zunicode-escapez+Expected text or file-like object, got {!r}   N)
isinstancer   readr   r   decodeUnicodeDecodeError	TypeErrorformattype	enumerater   r   Z
_TokenTypegroupcallabler   endError)textencodingiterableposcharZrexmatchactionm r   M/var/www/html/sandeepIITI/myenv/lib/python3.6/site-packages/sqlparse/lexer.py
get_tokens   s4    



zLexer.get_tokens)N)__name__
__module____qualname____doc__staticmethodr   r   r   r   r   r      s   r   Nc             C   s   t  j| |S )zTokenize sql.

    Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
    of ``(token type, value)`` items.
    )r   r   )Zsqlr   r   r   r   tokenizeK   s    r%   )N)r#   Zsqlparser   Zsqlparse.keywordsr   Zsqlparse.compatr   r   r   Zsqlparse.utilsr   objectr   r%   r   r   r   r   <module>   s   6