from django.conf import settings
from django.core.files.storage import default_storage
from bson import ObjectId
import urllib.parse
import os

# Create a logger'
import logging
logger = logging.getLogger('panel.utils')
# Set the logger level to DEBUG (or INFO, depending on your preference)
logger.setLevel(logging.DEBUG)

# Define the file path
logging_file = os.path.join(settings.LOGGING_FILE_ROOT, 'panel_utils.log')

# Create a file handler specific for this logger
file_handler = logging.FileHandler(logging_file)
file_handler.setLevel(logging.DEBUG)

# Create a logging format
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)

# Add the file handler to the logger
if not logger.handlers:
    logger.addHandler(file_handler)

def get_mongo_match(query_params, exclude_fields, search_fields=[]):
    match = {}
    # Define MongoDB filter types
    # filter_types = {
    #     'exact': lambda field, value: {field: int(value) if field.endswith('_id') else value},
    #     'exact_objectid': lambda field, value: {field: ObjectId(value) if ObjectId.is_valid(value) else value},
    #     'icontains': lambda field, value: {field: {'$regex': value, '$options': 'i'}},
    #     'in': lambda field, value: {field: {'$in': [v.strip() for v in value.split(',')] if ',' in value else [value]}},
    #     'range': lambda field, value: {field: {'$gte': float(value.split(',')[0]), '$lte': float(value.split(',')[1])}},
    #     'gte': lambda field, value: {field: {'$gte': float(value)}},
    #     'lt': lambda field, value: {field: {'$lt': float(value)}},
    #     'lte': lambda field, value: {field: {'$lte': float(value)}},
    #     'gt': lambda field, value: {field: {'$gt': float(value)}},
    #     'regex': lambda field, value: {field: {'$regex': value}},
    #     'iregex': lambda field, value: {field: {'$regex': value, '$options': 'i'}},
    #     'iexact': lambda field, value: {field: {'$regex': f"^{value}$", '$options': 'i'}},
    #     'startswith': lambda field, value: {field: {'$regex': f"^{value}"}},
    #     'istartswith': lambda field, value: {field: {'$regex': f"^{value}", '$options': 'i'}},
    #     'endswith': lambda field, value: {field: {'$regex': f"{value}$"}},
    #     'iendswith': lambda field, value: {field: {'$regex': f"{value}$", '$options': 'i'}}
    # }
    filter_types = {
        'exact': lambda field, value: {field: int(value) if field.endswith('_id') and field != 'listing_id' else value},
        'precise': lambda field, value: {field: float(value)},
        'icontains': lambda field, value: {field: {'$regex': value, '$options': 'i'}},
        'in': lambda field, value: {field: {'$in': [ObjectId(v.strip()) if ObjectId.is_valid(v.strip()) else (float(v.strip()) if v.strip().replace('.', '', 1).isdigit() else v.strip()) for v in value.split(',')] if ',' in value else [ObjectId(value) if ObjectId.is_valid(value) else (float(value) if value.replace('.', '', 1).isdigit() else value)]}},
        #'in': lambda field, value: {field: {'$in': [v.strip() for v in value.split(',')] if ',' in value else [value]}},
        # 'in_obj': lambda field, value: {field: {'$in': [ObjectId(v.strip()) for v in value.split(',')] if ',' in value else [ObjectId(value)]}},
        # 'in_str': lambda field, value: {field: {'$in': [v.strip() for v in value.split(',')] if ',' in value else [value]}},
        #'in_num': lambda field, value: {field: {'$in': [float(v.strip()) for v in value.split(',')] if ',' in value else [float(value)]}},
        'range': lambda field, value: {field: {'$gte': float(value.split(',')[0]), '$lte': float(value.split(',')[1])}},
        'gte': lambda field, value: {field: {'$gte': float(value)}},
        'lt': lambda field, value: {field: {'$lt': float(value)}},
        'lte': lambda field, value: {field: {'$lte': float(value)}},
        'gt': lambda field, value: {field: {'$gt': float(value)}},
        'regex': lambda field, value: {field: {'$regex': value}},
        'iregex': lambda field, value: {field: {'$regex': value, '$options': 'i'}},
        'iexact': lambda field, value: {field: {'$regex': f"^{value}$", '$options': 'i'}},
        'startswith': lambda field, value: {field: {'$regex': f"^{value}"}},
        'istartswith': lambda field, value: {field: {'$regex': f"^{value}", '$options': 'i'}},
        'endswith': lambda field, value: {field: {'$regex': f"{value}$"}},
        'iendswith': lambda field, value: {field: {'$regex': f"{value}$", '$options': 'i'}},
        'exists': lambda field, value: {field: {'$exists': True if value.lower() == 'true' else False}}
    }
    
    special_searchble_fields = ['bairro', 'cidade', 'estado']
    
    special_keys = ['plantas_quartos','ordering', 'page_size', 'page', 'reference', 'fields', 'search', 'medias_limit', 'save_params','geo','cidade','estado','bairro']
    
    for key, values in query_params.items():
        if not values:
            continue
        if key in special_keys:
            continue
        value = values[0] if isinstance(values, list) else values
        
        # Decodificar a URL para garantir que espaços são convertidos corretamente
        value = urllib.parse.unquote(value)
        
        parts = key.split('__')
        field = '__'.join(parts[:-1]) if len(parts) > 1 and parts[-1] in filter_types else key
        filter_type = parts[-1] if len(parts) > 1 and parts[-1] in filter_types else 'exact'
        if filter_type == 'exact':
            if ObjectId.is_valid(value):
                filter_type = 'exact_objectid'
                if field == 'id':
                    field = '_id'
        
        
        # if key in special_searchble_fields:
        #     print(f"Special Search Field: {key}")
        #     search = value
        #     search_match = {'$or': [{field: {'$regex': f".*{search}.*", '$options': 'i'}} for field in search_fields]}
        #     match.update(search_match)
        
        if exclude_fields and field in exclude_fields:
            continue
        elif key.startswith('query_params__'):
            continue
        else:
            method = filter_types.get(filter_type, lambda f, v: {f: v})
            field = field.replace('__', '.')
            match.update(method(field, value))
        
        
    if 'cidade' in query_params:
        from v1.models import City
        # cidade = Cidade.objects.mongo_find_one({'nome': {'$regex': f"^{query_params['cidade']}", '$options': 'i'}})
        # if cidade:
        #     match.update({'cidade_id': [cidade['_id']]})
        cidades = query_params['cidade'][0].split(',')
        cidade_ids = []
        for cidade in cidades:
            cidade_obj = City.objects.mongo_find_one({'city_name': {'$regex': f"^{cidade.strip()}", '$options': 'i'}})
            if cidade_obj:
                cidade_ids.append(cidade_obj['city_id'])
        #if cidade_ids:
        match.update({'cidade_id': {'$in': cidade_ids}})
    
    if 'estado' in query_params:
        from v1.models import State
        # estado = Estado.objects.mongo_find_one({'nome': {'$regex': f"^{query_params['estado']}", '$options': 'i'}})
        # if estado:
        #     match.update({'estado_id': [estado['_id']]})
        estados = query_params['estado'][0].split(',')
        estado_ids = []
        for estado in estados:
            estado_obj = State.objects.mongo_find_one({'state_name': {'$regex': f"^{estado.strip()}", '$options': 'i'}})
            if estado_obj:
                estado_ids.append(estado_obj['state_id'])
        #if estado_ids:
        match.update({'estado_id': {'$in': estado_ids}})
    
    if 'bairro' in query_params:
        from v1.models import Neighborhood
        # bairro = Bairro.objects.mongo_find_one({'nome': {'$regex': f"^{query_params['bairro']}", '$options': 'i'}})
        # if bairro:
        #     match.update({'bairro_id': [bairro['_id']]})
        bairros = query_params['bairro'][0].split(',')
        bairro_ids = []
        for bairro in bairros:
            bairro_obj = Neighborhood.objects.mongo_find_one({'neighborhood_name': {'$regex': f"^{bairro.strip()}", '$options': 'i'}})
            if bairro_obj:
                bairro_ids.append(bairro_obj['neighborhood_id'])
        #if bairro_ids:
        match.update({'bairro_id': {'$in': bairro_ids}})
    
        
    if 'search' in query_params:
        search = urllib.parse.unquote(query_params.get('search')[0])
        search_match = {'$or': [{field: {'$regex': f".*{search}.*", '$options': 'i'}} for field in search_fields]}
        match.update(search_match)
        
    
    # if 'ordering' in query_params:
    #     if query_params.get('ordering')[0].startswith('?'):
    #         match.update({'$expr': {'$gte': [{'$rand': {}}, 0.3]}})
    
    if 'geo' in query_params:
        
        # extract lat, lng and radius
        lat, lng, radius = map(float, query_params['geo'][0].split(','))
        if lat and lng and radius:
            # distance = haversine((-23.533773, -46.625290),(lat,lng), unit=Unit.METERS)
            # distance_difference = distance - radius
            # distance_sum = distance + radius
            # distance_range = [distance_difference, distance_sum]
            
            #print(f"Latitude: {lat}, Longitude: {lng}, Radius: {radius}")

            # Convert radius to degrees
            radius_in_degrees_lat = radius / 111000.0
            import math
            radius_in_degrees_lng = radius / (111000.0 * math.cos(math.radians(lat)))

            # Calculate bounding box
            min_lat = round(lat - radius_in_degrees_lat,6)
            max_lat = round(lat + radius_in_degrees_lat,6)
            min_lng = round(lng - radius_in_degrees_lng,6)
            max_lng = round(lng + radius_in_degrees_lng,6)
            
            #print(f"Bounding Box: Min lat: {min_lat}, Max lat: {max_lat}, Min lng: {min_lng}, Max lng: {max_lng}")
            
                #             lat__gte=min_lat,
                # lat__lte=max_lat,
                # lng__gte=min_lng,
                # lng__lte=max_lng

            match.update({
                'latitude': {'$gte': min_lat, '$lte': max_lat},
                'longitude': {'$gte': min_lng, '$lte': max_lng}
            })
    
    query_params_keys = [key for key in query_params if key.startswith('query_params__')]
    if len(query_params_keys) > 0:
        for key in query_params_keys:
            value = query_params[key][0]
            if key.find('[') != -1:
                nested_key = key[key.find('[')+1:key.find(']')]
            else:
                nested_key = key[key.find('__')+2:]
            match.update({f"query_params.{nested_key}": {'$regex': f".*{value}.*", '$options': 'i'}})
    return match


def save_query_params(query_params, url, model, user=None):
    if query_params and 'save_params' in query_params and query_params.get('save_params')[0] == '1':
        query_params.pop('save_params', None)
        
        # Flatten and sort query_params
        query_params = [{k: v[0] if isinstance(v, list) else v for k, v in query_params.items()}]
        # Ordering query_params
        query_params_sorted = dict(sorted(query_params[0].items()))
        
        # Prepare additional fields for MongoDB
        #additional_fields = {f"query_param_{k}": v for k, v in query_params_sorted.items()}
        additional_fields = {k: v for k, v in query_params_sorted.items()}
        

        from django.utils import timezone
        created_at = timezone.now()

        if user and not user.is_anonymous:
            from panel.models import QueryParamsUser
            user_id = [user.id]
            saved_params = QueryParamsUser.objects.mongo_find({'user_id': user_id, 'origin': model})
            if saved_params and saved_params.count() > 0:
                saved_params_id = saved_params[0]['_id']
                try:
                    QueryParamsUser.objects.mongo_update_one(
                            {'id': ObjectId(saved_params_id)},
                            {'$set': {'query_params': [query_params_sorted], 'url': url, 'origin': model, 'updated_at': created_at}}
                        )
                except Exception as e:
                    logger.error(f"Error updating query params: {e}")
            else:
                try:
                    QueryParamsUser.objects.mongo_insert(
                        {'user_id': user_id, 'query_params': [query_params_sorted], 'url': url, 'origin': model, 'created_at': created_at}
                    )
                except Exception as e:
                    logger.error(f"Error saving query params: {e}")

        from panel.models import QueryParamsGeneral
        # save general query params
        try:
            document = {
                'query_params': [query_params_sorted],
                'url': url,
                'origin': model,
                'created_at': created_at,
                **additional_fields
            }
            
            QueryParamsGeneral.objects.mongo_insert(document)
        except Exception as e:
            logger.error(f"Error saving query params: {e}")
    else:
        logger.info("No user or query params to save")



def build_aggregation_pipeline(request, exclude_fields, search_fields=None, user=None, model=None):
    
    query_params = dict(request.query_params)
    
    match = get_mongo_match(query_params, exclude_fields, search_fields)
    
    pipeline = [{'$match': match}]

    
    ## get the page size from settings.py
    page_size_default = settings.REST_FRAMEWORK.get('PAGE_SIZE', 10)
    page_size = int(request.query_params.get('page_size', page_size_default))
    
    # Handle ordering
    ordering = request.query_params.get('ordering', '-_id').strip()
    if ordering.startswith('?'):
        pipeline.append({'$sample': {'size': page_size}})
    else:
        sort_order = -1 if ordering.startswith('-') else 1
        sort_field = ordering.lstrip('-')
        pipeline.append({'$sort': {sort_field: sort_order}})
    
    # Handle pagination
    if not ordering.startswith('?'):
        page = int(request.query_params.get('page', 1))
        skip = (page - 1) * page_size
        pipeline.extend([{'$skip': skip}, {'$limit': page_size}])
    

    
    # Optionally add projection to reduce fields returned
    
    ### fields = request.query_params.get('fields', None) limit the fields returned
    if 'fields' in request.query_params:
        fields = request.query_params.get('fields').split(',')
        projection = {field: 1 for field in fields}
        pipeline.append({'$project':
            projection
        })
    else:
        pipeline.append({'$project': {'_id': 1}})
    
    #print(f"pipeline: {pipeline}")
    # Pipeline Filter only with filters without limit, returning count documents
    pipeline_filter = [{'$match': match}]
    #print(f"pipeline_filter: {pipeline_filter}")
    #Count the documents
    pipeline_filter.append({'$count': 'total'})
    
    return pipeline, pipeline_filter




from urllib.parse import urlencode, urlparse, urlunparse, parse_qs
from datetime import datetime

def build_absolute_uri(request, new_params):
    url_parts = list(urlparse(request.build_absolute_uri()))
    query = parse_qs(url_parts[4])
    query.update(new_params)
    url_parts[4] = urlencode(query, doseq=True)
    return urlunparse(url_parts)

# from django.core.mail import EmailMessage
# from django.template.loader import render_to_string
# from django.utils.html import strip_tags
# from django.conf import settings

# def send_contact_email(recipient_email, context):
#     # Load the static template and render it with context data
#     html_message = render_to_string('email_template.html', context)
#     plain_message = strip_tags(html_message)  # For plain-text fallback

#     email = EmailMessage(
#         subject="Your Subject Here",
#         body=plain_message,
#         from_email=settings.DEFAULT_FROM_EMAIL,
#         to=[recipient_email]
#     )
#     email.content_subtype = 'html'  # Set content type to HTML

#     # Attach the uploaded header and footer images
#     email.attach_file(settings.EMAIL_HEADER_IMAGE_PATH)
#     email.attach_file(settings.EMAIL_FOOTER_IMAGE_PATH)

#     # Embed the images into the HTML content
#     email.attach_inline('header_image', open(settings.EMAIL_HEADER_IMAGE_PATH, 'rb').read(), 'image/jpeg')
#     email.attach_inline('footer_image', open(settings.EMAIL_FOOTER_IMAGE_PATH, 'rb').read(), 'image/jpeg')

#     # Send the email
#     email.send()



# # Upload file handler
# def handle_uploaded_file(file, folder_name, instance_id=None, multiple=False, foreign_id=None, type="image"):
#     try:
#         if type=="image":
#             # Check if the file is an image
#             if not file.content_type.startswith('image'):
#                 logger.error("File is not an image.")
#                 return None
#             folder_type = 'img'
#         else:
#             folder_type = 'doc'
        
#         # if image is webp convert to jpg
#         # if file.content_type == 'image/webp':
#         #     from PIL import Image
#         #     from io import BytesIO
#         #     im = Image.open(file)
#         #     im = im.convert('RGB')
#         #     output = BytesIO()
#         #     im.save(output, format='JPEG', quality=95)
#         #     output.seek(0)
#         #     output.name = f"{os.path.splitext(file.name)[0]}.jpg"
#         #     file_content = output.getvalue()
#         #     file = output
        
#         # Ensure the directory exists
#         if multiple==True and foreign_id:
#             base_dir = os.path.join(folder_name.lower(),folder_type,str(foreign_id))
#             #logger.info(f"Directory: {base_dir}")
#         elif instance_id:
#             base_dir = os.path.join(folder_name.lower(),folder_type,str(instance_id))
#             #logger.info(f"Directory: {base_dir}")
#         else:
#             base_dir = os.path.join(folder_name.lower(),folder_type)
#             #logger.info(f"Directory: {base_dir}")
        
#         directory = os.path.join(settings.MEDIA_ROOT, base_dir)
        
#         os.makedirs(directory, exist_ok=True)
        
#         #logger.info(f"Directory: {directory}")
        
#         # Change file name to avoid conflicts
#         file.name = f"{instance_id}{os.path.splitext(file.name)[1]}"
#         # logger.info(f"File Name: {file.name}")
#         # logger.info(f"File Size: {file.size}")
#         # logger.info(f"File Content Type: {file.content_type}")

#         # Set the save path
#         save_path = os.path.join(directory, file.name)
#         #logger.info(f"Saving to path: {save_path}")

#         # Write the file content directly in chunks
#         with default_storage.open(save_path, 'wb') as destination:
#             for chunk in file.chunks():
#                 destination.write(chunk)

#         #logger.info(f"File saved successfully at: {save_path}")
#         #print(f"File saved successfully at: {save_path}")

#         file_url = f"{base_dir}/{file.name}"
#         return file_url

#     except Exception as e:
#         logger.error(f"Error uploading file: {e}")
#         return e


#from django.conf import settings
#from django.core.files.storage import default_storage
#import os
#import logging
from uuid import uuid4

#logger = logging.getLogger('upload')

def handle_uploaded_file(file, folder_name, instance_id=None, multiple=False, foreign_id=None, type="image"):
    try:
        if type == "image":
            if not file.content_type.startswith('image'):
                logger.error("File is not an image.")
                return None
            folder_type = 'img'
        else:
            folder_type = 'doc'

        # Build base path (folder inside media root or S3 bucket)
        if multiple and foreign_id:
            base_dir = os.path.join(folder_name.lower(), folder_type, str(foreign_id))
        elif instance_id:
            base_dir = os.path.join(folder_name.lower(), folder_type, str(instance_id))
        else:
            base_dir = os.path.join(folder_name.lower(), folder_type)

        # Ensure safe, unique filename (avoid overwriting in S3)
        ext = os.path.splitext(file.name)[1]
        filename = f"{instance_id or uuid4().hex}{ext}"
        file_path = os.path.join(base_dir, filename)

        # In development, create folders if needed
        if not settings.USE_S3:
            full_dir = os.path.join(settings.MEDIA_ROOT, base_dir)
            os.makedirs(full_dir, exist_ok=True)

        # Save the file using Django's default storage (works for both S3 and local)
        saved_path = default_storage.save(file_path, file)

        # Return relative URL (Django will resolve to full URL via MEDIA_URL)
        return saved_path

    except Exception as e:
        logger.error(f"Error uploading file: {e}")
        return None


from rest_framework import serializers
from collections import OrderedDict
class OrderedDictField(serializers.Field):
    def to_representation(self, value):
        if isinstance(value, OrderedDict):
            # Recursively process the OrderedDict to handle ObjectIds and datetime objects
            return {k: str(v) if isinstance(v, ObjectId) else v.isoformat() if isinstance(v, datetime) else self.to_representation(v) for k, v in value.items()}
        elif isinstance(value, list):
            # Process the list items to handle ObjectIds and datetime objects
            return [self.to_representation(v) for v in value]
        elif isinstance(value, ObjectId):
            # Convert ObjectId to string
            return str(value)
        elif isinstance(value, datetime):
            # Convert datetime to ISO 8601 string format
            return value.isoformat()
        return value

    def to_internal_value(self, data):
        return data