from models.compensation import CompensationInput
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from config import Config
from collections import OrderedDict

class CompensationsService:

    @staticmethod
    def get_specifique_compensations(filters, company_id):
        """
        Fetch specific compensations based on filters and company ID.
        
        If `postes`, `sectors`, or `company_ids` are empty, they are removed from the query condition.
        
        Args:
            filters (dict): Filters containing "jobs", "business_sector", and "companies".
            company_id (int): ID of the company to include in the query.
        
        Returns:
            pd.DataFrame: DataFrame containing the fetched compensation data.
        """
        try:
            # Extract filters with defaults
            postes = filters.get("jobs", [])
            sectors = filters.get("business_sector", [])
            company_ids = filters.get("companies", []) 
            if len(company_ids)!=0:
                company_ids=company_ids+ [company_id]
            else:
                pass

            # Initialize query components
            query_conditions = []

            # Helper function to add conditions dynamically
            def add_in_condition(column_name, values):
                if values:  # Only add the condition if values are not empty
                    placeholders = ", ".join(f'"{value}"' for value in values)
                    query_conditions.append(f"{column_name} IN ({placeholders})")

            add_in_condition("s.businessSectorCode", sectors)
            add_in_condition("c.companyId", company_ids)
            add_in_condition("c.job", postes)
            where_clause = " AND ".join(query_conditions) if query_conditions else "1=1"

            query = f"""
                SELECT c.companyId, c.matricule, c.gross_annual_salary, c.meal_allowance, 
                    c.location_allowance, c.transportation_allowance, c.other_bonuses_awarded_last_year, 
                    c.fixed_allowances, c.total_amount_performance_bonus_awarded, 
                    c.total_amount_sales_bonus_awarded, c.total_amount_sales_commissions_awarded, 
                    c.schooling_allowance, c.seniority_bonus, c.function_code, c.subfunction_code, 
                    c.collaborative_path, c.grade, c.country, c.city, c.site, c.department, c.internal_grade, c.gender, c.job, c.function
                FROM company_compensation c
                LEFT JOIN company s ON c.companyId = s.id
                WHERE {where_clause};
            """

            # Execute the query
            #engine=Config.get_db_engine() 
            #with engine.connect() as connection:
            with Config.get_db_connection() as connection:
                result = pd.read_sql_query(query, connection)

            return result

        except Exception as e:
            raise RuntimeError(f"Error fetching specific compensations: {e}")

    #Calcul
    @staticmethod
    def calculate_salary_statistics(df, var):
        """
        Calculate salary statistics for the given variable (column) in the DataFrame.

        Parameters:
        - df: The DataFrame containing the data for the group.
        - var: The column name for salary data.

        Returns:
        - A dictionary of calculated salary statistics.
        """
        
        # Extract the salary column as a Series

        salaries = df[var]
        # Calculate statistics
        total_salary = salaries.sum()
        num_salaries = len(salaries)
        average_salary = total_salary / num_salaries

        # Median
        median_salary = salaries.median()
        # Min and Max
        min_salary = salaries.min()
        max_salary = salaries.max()
        
        # Standard Deviation and Percentiles

        std_dev_salary = salaries.std()

        percentile_25 = salaries.quantile(0.25)
        percentile_75 = salaries.quantile(0.75)

        return {
            #'total_salaries': total_salary,
            f'{var}_average': average_salary,
            'number_of_incumbents': num_salaries,
            f'{var}_25': percentile_25,
            f'{var}_50': median_salary,
            f'{var}_75': percentile_75,
            #f'{var}_std_dev': std_dev_salary,
            #f'{var}_min': min_salary,
            #f'{var}_max': max_salary,
        }

    @staticmethod
    def calculate_base_salary_diff(var,data):
      
        # Drop rows with NaN values in the required columns        
        data[var+'_diff_25_perc']=100*(data[var+'_your_data']-data[var+'_25'])/data[var+'_25']
        data[var+'_diff_50_perc']=100*(data[var+'_your_data']-data[var+'_50'])/data[var+'_50']
        data[var+'_diff_75_perc']=100*(data[var+'_your_data']-data[var+'_75'])/data[var+'_75']

        return data
        
    def calculate_display_var(vars,data,condition):
        output=pd.DataFrame()
        for var in vars:

            output[var]=data.groupby(condition)[var].agg(lambda x: ", ".join(x.unique()))

        return output.reset_index()
        
    @staticmethod
    def get_compensation_general_graph(general_graphcode, companyId, compensations,round):
        results = []
        company_data = compensations[compensations["companyId"]==companyId]
        # Calculate data only for requested graphs
        if 'gap_general' in general_graphcode:
            # Calculate Average Salary metrics
            Brut_fixe_societe_median = company_data["base_salary"].median()
            Brut_fixe_gi_median = compensations["base_salary"].median()
            Brut_fixe_variation = (Brut_fixe_societe_median/Brut_fixe_gi_median-1) if pd.notna(Brut_fixe_societe_median/Brut_fixe_gi_median-1) else 0 
            
            Brut_total_societe_median = company_data["total_compensation"].median()
            Brut_total_gi_median = compensations["total_compensation"].median()
            Brut_total_variation = (Brut_total_societe_median/Brut_total_gi_median -1) if pd.notna(Brut_total_societe_median/Brut_total_gi_median -1) else 0
            Brut_total_societe_median = Brut_total_societe_median if pd.notna(Brut_total_societe_median) else 0
            Brut_fixe_societe_median = Brut_fixe_societe_median if pd.notna(Brut_fixe_societe_median) else 0


            gap_general_data =  {
                    'name': 'gap_general', 
                    'data': [
                        {
                            "a_salary": "Brut Fixe", 
                            "b_society_median": f"{Brut_fixe_societe_median:,.{round}f}".replace(',', ' '),
                            "c_gap":"{:.2%}".format(Brut_fixe_variation),
                            "d_market_median": f"{Brut_fixe_gi_median:,.{round}f}".replace(',', ' '),
                          
                        },
                        {
                            "a_salary": "Brut Total", 
                            "b_society_median": f"{Brut_total_societe_median:,.{round}f}".replace(',', ' '),
                            "c_gap": "{:.2%}".format(Brut_total_variation),
                            "d_market_median": f"{Brut_total_gi_median:,.{round}f}".replace(',', ' ')
                            
                        }
                    ]
                }
            results.append(( gap_general_data))
        
        if 'general_internal_gap_fixed' in general_graphcode:
            # Calculate salary range metrics
            Brut_fixe_societe_median = company_data["base_salary"].median()

            Brut_total_societe_median = company_data["total_compensation"].median()
            fixed_base_salary_max = company_data["base_salary"].max()
            fixed_base_salary_max_perc = (fixed_base_salary_max/Brut_fixe_societe_median-1) if pd.notna(fixed_base_salary_max/Brut_fixe_societe_median-1) else 0  
            fixed_base_salary_min = company_data["base_salary"].min() if pd.notna(company_data["base_salary"].min()) else 0   
            fixed_base_salary_min_perc =  (fixed_base_salary_min/Brut_fixe_societe_median-1) if pd.notna( fixed_base_salary_min/Brut_fixe_societe_median-1) else 0  
            fixed_range = (1-fixed_base_salary_min/fixed_base_salary_max) if pd.notna(1-fixed_base_salary_min/fixed_base_salary_max) else 0   
            
            total_base_salary_max = company_data["total_compensation"].max()
            total_base_salary_max_perc = (total_base_salary_max/Brut_total_societe_median-1) if pd.notna(total_base_salary_max/Brut_total_societe_median-1) else 0  
            total_base_salary_min =company_data["total_compensation"].min() if pd.notna(company_data["total_compensation"].min()) else 0  
            total_base_salary_min_perc = (total_base_salary_min/Brut_total_societe_median-1) if pd.notna(total_base_salary_min/Brut_total_societe_median-1) else 0  
            total_range = (1-total_base_salary_min/total_base_salary_max) if pd.notna(1-total_base_salary_min/total_base_salary_max) else 0  
            
            Brut_total_societe_median = Brut_total_societe_median if pd.notna(Brut_total_societe_median) else 0
            Brut_fixe_societe_median = Brut_fixe_societe_median if pd.notna(Brut_fixe_societe_median) else 0
            fixed_base_salary_max = fixed_base_salary_max if pd.notna(fixed_base_salary_max) else 0
            total_base_salary_max = total_base_salary_max if pd.notna(total_base_salary_max) else 0
            
            internal_gap_data = {
                    'name': 'general_internal_gap_fixed', 
                    'data': [{
                      "fixed_median": f"{Brut_fixe_societe_median:,.{round}f}".replace(',', ' '),
                        "fixed_base_salary_max": f"{fixed_base_salary_max:,.{round}f}".replace(',', ' '),
                        "fixed_base_salary_max_perc": "{:.2%}".format(fixed_base_salary_max_perc),
                        "fixed_base_salary_min": f"{fixed_base_salary_min:,.{round}f}".replace(',', ' '),
                        "fixed_base_salary_min_perc": "{:.2%}".format(fixed_base_salary_min_perc),
                        "fixed_range": "{:.2%}".format(fixed_range)}, 
                        {"total_median": f"{Brut_total_societe_median:,.{round}f}".replace(',', ' '),
                        "total_base_salary_max": f"{total_base_salary_max:,.{round}f}".replace(',', ' '),
                        "total_base_salary_max_perc": "{:.2%}".format(total_base_salary_max_perc),
                        "total_base_salary_min":  f"{total_base_salary_min:,.{round}f}".replace(',', ' '),
                        "total_base_salary_min_perc": "{:.2%}".format(total_base_salary_min_perc),
                        "total_range": "{:.2%}".format(total_range)
                    }]
                }
            results.append((internal_gap_data))
        
        # Handle unknown graph codes
        for code in general_graphcode:
            if code not in ['gap_general', 'general_internal_gap_fixed']:
                results=results
        
        return results

    @staticmethod
    def get_compensation_graph_by_criteria(specifique_graphcode, condition,companyId, compensations,round):
        # Filter data for the specified company once (for efficiency)
        company_data = compensations[compensations["companyId"] == companyId]

        # Calculate gaps only if needed by requested graphs
        required_graphs = set(specifique_graphcode)
        results = {
            'repartition_gap': 0,
            'repartition_data_bf': 0,
            'repartition_data_bt': 0
        }
        
        # Calculate gaps vs other companies
        grouped_compensation=compensations.groupby(condition)["base_salary"].count().reset_index()
        grouped_compensation["company_based_salary"]=company_data.groupby(condition)["base_salary"].mean().reset_index()["base_salary"]
        grouped_compensation["others_based_salary"]= compensations.groupby(condition)["base_salary"].mean().reset_index()["base_salary"]
        grouped_compensation["fixe_brut_ecart"] = grouped_compensation["company_based_salary"]/ grouped_compensation["others_based_salary"]- 1
        
        grouped_compensation["company_total_compensation"]=company_data.groupby(condition)["total_compensation"].mean().reset_index()["total_compensation"]
        grouped_compensation["others_total_compensation"]= compensations.groupby(condition)["total_compensation"].mean().reset_index()["total_compensation"]
        grouped_compensation["total_brut_ecart"] =grouped_compensation["company_total_compensation"]/ grouped_compensation["others_total_compensation"]- 1

        # --- Compute only what's needed ---
        if 'repartition_gap' in required_graphs:

            # Compute mean gaps
            pos_fixe = grouped_compensation[grouped_compensation["fixe_brut_ecart"] >= 0]["fixe_brut_ecart"].mean() if pd.notna(grouped_compensation[grouped_compensation["fixe_brut_ecart"] >= 0]["fixe_brut_ecart"].mean()) else 0
            neg_fixe = grouped_compensation[grouped_compensation["fixe_brut_ecart"] <= 0]["fixe_brut_ecart"].mean() if pd.notna(grouped_compensation[grouped_compensation["fixe_brut_ecart"] <= 0]["fixe_brut_ecart"].mean()) else 0 
            pos_total = grouped_compensation[grouped_compensation["total_brut_ecart"] >= 0]["total_brut_ecart"].mean() if pd.notna(grouped_compensation[grouped_compensation["total_brut_ecart"] >= 0]["total_brut_ecart"].mean()) else 0 
            neg_total = grouped_compensation[grouped_compensation["total_brut_ecart"] <= 0]["total_brut_ecart"].mean() if pd.notna(grouped_compensation[grouped_compensation["total_brut_ecart"] <= 0]["total_brut_ecart"].mean()) else 0  
            
            results['repartition_gap'] = {
                'name':"repartition_gap",
                'data': [
                    {"a_market_spread": "Positif", "b_fixed_gross": "{:.2%}".format(pos_fixe), "c_total_gross": "{:.2%}".format(pos_total)},
                    {"a_market_spread": "Négatif", "b_fixed_gross": "{:.2%}".format(neg_fixe), "c_total_gross": "{:.2%}".format(neg_total)}
                ]
            }

        if 'repartition_data_bf' in required_graphs:
            
            base_var = abs(grouped_compensation["fixe_brut_ecart"]).sum()
            #to update
            if base_var==0:
                base_var=1

            pos_share =  grouped_compensation[grouped_compensation["fixe_brut_ecart"] >= 0]["fixe_brut_ecart"].sum() / base_var if pd.notna(grouped_compensation[grouped_compensation["fixe_brut_ecart"] >= 0]["fixe_brut_ecart"].sum() / base_var) else 0 
            neg_share = abs(grouped_compensation[grouped_compensation["fixe_brut_ecart"] <= 0]["fixe_brut_ecart"]).sum() / base_var if pd.notna(abs(grouped_compensation[grouped_compensation["fixe_brut_ecart"] <= 0]["fixe_brut_ecart"]).sum() / base_var) else 0 
            
            results['repartition_data_bf'] = {
                'name': 'repartition_data_bf',
                    'data': [
                    {"gap_type": "Positif", "positions_percentage": "{:.2%}".format(pos_share)},
                    {"gap_type": "Négatif", "positions_percentage": "{:.2%}".format(neg_share)}
                ]
                }

        if 'repartition_data_bt' in required_graphs:

            total_var = abs(grouped_compensation["total_brut_ecart"]).sum()
            if total_var==0:
                total_var=1

            pos_share = grouped_compensation[grouped_compensation["total_brut_ecart"] >= 0]["total_brut_ecart"].sum() / total_var if pd.notna(grouped_compensation[grouped_compensation["total_brut_ecart"] >= 0]["total_brut_ecart"].sum() / total_var) else 0

            neg_share = abs(grouped_compensation[grouped_compensation["total_brut_ecart"] <= 0]["total_brut_ecart"]).sum() / total_var if pd.notna(abs(grouped_compensation[grouped_compensation["total_brut_ecart"] <= 0]["total_brut_ecart"]).sum() / total_var) else 0
            
            results['repartition_data_bt'] = {
                'name':'repartition_data_bt',
                'data': [
                    {"gap_type": "Positif", "positions_percentage": "{:.2%}".format(pos_share)},
                    {"gap_type": "Négatif", "positions_percentage": "{:.2%}".format(neg_share)}
                ]
            }
        # Return only requested graphs (with error handling for invalid codes)
        return [
            (results.get(code, f"{code} is unknown")) 
            for code in specifique_graphcode]
        
    def create_best_matchg_df(df, companyId,condition,colunm):
        l=[[ "function_code","subfunction_code", "collaborative_path", "grade"],
        [ "function_code","subfunction_code", "grade"],
        [ "function_code", "grade"],
        [ "grade"]]
        n=len(condition)
        match=[]
        for i in range((len(l)-n),len(l)):
            df['match']=df[l[i]].apply(tuple, axis=1)
            df_company=df[df["companyId"] == companyId]
            if len(df_company)==0:
                break

            df_other=df[df["companyId"] != companyId]
            
            match_df=df_other[df_other['match'].isin(df_company['match'])]
                    
            Founded = df_company[df_company['match'].isin(match_df['match'])]
            matched_data = df[df['match'].isin(match_df['match'])]
            
            df = df.drop(Founded.index)
            match.append(matched_data)
        
        if len(df_company)!=0:
            
            df_company=df[df['companyId'] == companyId]
        
        match.append(df_company)

        best_match_df = pd.concat(match, ignore_index=True)
        
        return best_match_df

    def buildComparisonDatasets(kpis, company_df, condition1):
        base_salary_your_data = company_df.groupby(condition1)['base_salary'].median()
        base_salary_your_data = pd.DataFrame(base_salary_your_data.reset_index().rename(columns={"base_salary": "base_salary_your_data"}))

        total_compensation_your_data = company_df.groupby(condition1)['total_compensation'].median()
        total_compensation_your_data = pd.DataFrame(total_compensation_your_data.reset_index().rename(columns={"total_compensation": "total_compensation_your_data"}))

        global_compensation_your_data = company_df.groupby(condition1)['global_compensation'].median()
        global_compensation_your_data = pd.DataFrame(global_compensation_your_data.reset_index().rename(columns={"global_compensation": "global_compensation_your_data"}))

        res = pd.concat([base_salary_your_data, total_compensation_your_data, global_compensation_your_data], axis=1)
        res = res.loc[:, ~res.columns.duplicated()]

        res = pd.merge(kpis, res, on=condition1, how="left")

        base_salary_diff = CompensationsService.calculate_base_salary_diff("base_salary", res)
        total_compensation_diff = CompensationsService.calculate_base_salary_diff("total_compensation", res)
        global_compensation_diff = CompensationsService.calculate_base_salary_diff("global_compensation", res)

        CompareKPIs = pd.concat([res, base_salary_diff, total_compensation_diff, global_compensation_diff], axis=1)
        CompareKPIs = CompareKPIs.loc[:, ~CompareKPIs.columns.duplicated()].replace(np.nan, 0)
        return CompareKPIs
            
    def buildKpisDatasets(compensations_extraction,include,condition,companyId):
        if include == False:
            compensations = compensations_extraction[compensations_extraction["companyId"] != companyId]
        else:
            compensations = compensations_extraction
        base_salary = compensations.groupby(condition).apply(lambda group:CompensationsService.calculate_salary_statistics(group, "base_salary")).reset_index()
        base_salary = pd.concat([base_salary[condition], pd.json_normalize(base_salary[base_salary.columns[-1]])], axis=1)

        total_compensation = compensations.groupby(condition).apply(lambda group: CompensationsService.calculate_salary_statistics(group, "total_compensation")).reset_index()
        total_compensation = pd.concat([total_compensation[condition], pd.json_normalize(total_compensation[total_compensation.columns[-1]])], axis=1)

        global_compensation = compensations.groupby(condition).apply(lambda group: CompensationsService.calculate_salary_statistics(group, "global_compensation")).reset_index()
        global_compensation = pd.concat([global_compensation[condition], pd.json_normalize(global_compensation[global_compensation.columns[-1]])], axis=1)

        kpis = pd.concat([base_salary, total_compensation, global_compensation], axis=1)
        kpis = kpis.loc[:, ~kpis.columns.duplicated()]
        return kpis
    
    #Data loading
    @staticmethod
    def get_compensation_output_by_criteria(compensations_extraction, companyId, criterias, round ,include,compare):
          
        Output = []
        for criteria in criterias:

            cond = criteria.get("condition")
            specifique_graphcode = criteria.get("graph")
            column_associe_condition = criteria.get("column_associe_condition")
            column_other_display = criteria.get("column_other_display")
            best_matching=criteria.get("best_matching")
            
            # create best matching dataset
            if best_matching ==True:
                compensations_extraction=CompensationsService.create_best_match_df(compensations_extraction, companyId,cond,column_associe_condition)
                condition=['match']
            else:
                condition=cond
            
            # select company data
            company_df=compensations_extraction[compensations_extraction["companyId"] == companyId]

            #get graphics by criteria
            specifique_graph_output = CompensationsService.get_compensation_graph_by_criteria(specifique_graphcode, condition, companyId, compensations_extraction, round)
            
            #calculate standard kpis
            KPIs_Result=CompensationsService.buildKpisDatasets(compensations_extraction,include,condition,companyId)

            # define comparaison condition
            # if column_associe_condition not empty, we will use it instead of criteria, add column_associe_condition to the output
            if column_associe_condition == []:

                condition1=condition
            else:
                condition1=column_associe_condition
                result[condition1[0]]=pd.merge(KPIs_Result, company_df.groupby(condition)[condition1[0]].unique(), on=condition, how="right")[condition1[0]]
                result=result.dropna(subset=[condition1[0]])
                result=result.explode(condition1[0]).reset_index(drop=True)
            
            if compare == True:
                result=CompensationsService.buildComparisonDatasets(result, company_df, condition1)
            
            # add criteria column to the output 
            if (best_matching == True) or (column_associe_condition != []):
                result= pd.merge(result,company_df[cond+condition], on=condition, how="right")  
            else:
                pass
            
            # number columns are calculated based on possible match for the best matching case else on criteria.
            if best_matching ==True:
                grouping=condition
            else:
                grouping=condition1
        
            result ["number_organisation"]=pd.merge(result, compensations_extraction.groupby(grouping)['companyId'].nunique().reset_index(), on=grouping, how="left")['companyId']
            result ["number_employee_panel"]=pd.merge(result, compensations_extraction.groupby(grouping)['base_salary'].count().reset_index(), on=grouping, how="left")['base_salary']
            result ["number_employee_company"]=pd.merge(result, company_df.groupby(grouping)['base_salary'].count().reset_index(), on=grouping, how="left")['base_salary'].replace(np.nan, 0)
            result = result[result["number_employee_panel"] > result["number_employee_company"]]
            result = result[result["number_employee_panel"] > 1]
            result =  result[result[cond+condition1].apply(tuple, axis=1).isin(company_df[cond+condition1].apply(tuple, axis=1))]
            
            #add row not founded in the best match to the final output
            if best_matching == True:
                unfounded=company_df[~company_df[column_associe_condition[0]].isin(result[column_associe_condition[0]])][column_associe_condition[0]]
                result=pd.concat([result, unfounded], axis=0).replace(np.nan, 0)
            else:
                pass
            
            '''result = result.drop(["number_organisation","number_employee_panel","number_employee_company"], axis=1)'''
            
            #calculate display variables
            if column_other_display!=[]:
                display_var= CompensationsService.calculate_display_var(column_other_display,company_df,condition1)
                result = pd.merge(result, display_var, on=condition1, how="left")
            else:
                pass
        
            result = result.drop_duplicates()
            
            #formatting output by criteria
            result = result.rename(columns={col: f"a_{col}" for col in cond+condition1+column_other_display})
            prefixed_condition_cols = [f"a_{col}" for col in cond]
            float_cols =[
            'base_salary_average', 'base_salary_25', 'base_salary_50', 'base_salary_75',
            'base_salary_std_dev', 'base_salary_your_data',
            'base_salary_diff_25_perc', 'base_salary_diff_50_perc', 'base_salary_diff_75_perc',

            'total_compensation_average', 'total_compensation_25', 'total_compensation_50',
            'total_compensation_75', 'total_compensation_std_dev', 'total_compensation_your_data',
            'total_compensation_diff_25_perc', 'total_compensation_diff_50_perc', 'total_compensation_diff_75_perc',

            'global_compensation_average', 'global_compensation_25', 'global_compensation_50',
            'global_compensation_75', 'global_compensation_std_dev', 'global_compensation_your_data',
            'global_compensation_diff_25_perc', 'global_compensation_diff_50_perc', 'global_compensation_diff_75_perc'
            ]
            
            desired_order = [
                'a_matricule',
            'a_function_code', 'a_subfunction_code', 'a_collaborative_path', 'a_grade','match',
            'a_country', 'a_city', 'a_site', 'a_department','a_job', 'a_function','a_internal_grade', 'a_gender',
            'number_organisation','number_employee_panel','number_employee_company'] + float_cols
            int_col = [ 'a_grade','number_organisation','number_employee_panel','number_employee_company']
            int_col = [col for col in int_col if col in result.columns]
            # Reorder columns - only include columns that exist in the result
            existing_columns = [col for col in desired_order if col in result.columns]
            result = result[existing_columns]
            result[int_col] = result[int_col].applymap(lambda x: f"{x:,.{0}f}".replace(",", " "))
            float_columns = [col for col in float_cols if col in result.columns]
            result[float_columns] = result[float_columns].applymap(lambda x: f"{x:,.{round}f}".replace(",", " "))
            # Fill numeric columns with 0
            result[result.select_dtypes(include='number').columns] = \
                result.select_dtypes(include='number').fillna(0)
            # Fill object/string columns with 'Missing'
            result[result.select_dtypes(include='object').columns] = \
                result.select_dtypes(include='object').fillna('Missing')

            output = {
                "criteria": prefixed_condition_cols,
                "table_data": result.to_dict(orient="records"),
                "graph_data": specifique_graph_output
            }
            Output.append(output)

        return Output
    
    def BuildFinalResponse(general_graphcode, filters, companyId, criteria, round, include, compare):
        compensations_extraction = CompensationsService.get_specifique_compensations(filters, companyId)
        
        if compensations_extraction.empty:
            return "No compensations data"
        else:
            compensations_extraction.rename(columns={"gross_annual_salary": "base_salary"}, inplace=True)
            compensations_extraction["total_compensation"] = compensations_extraction[['base_salary', 'total_amount_performance_bonus_awarded',
                                                                        'total_amount_sales_bonus_awarded', 'total_amount_sales_commissions_awarded']].sum(axis=1)

            compensations_extraction["global_compensation"] = compensations_extraction[['base_salary', 'meal_allowance', 'location_allowance', 'transportation_allowance','other_bonuses_awarded_last_year', 'fixed_allowances', 'total_amount_performance_bonus_awarded',
                                                                        'total_amount_sales_bonus_awarded', 'total_amount_sales_commissions_awarded','schooling_allowance', 'seniority_bonus']].sum(axis=1)

        output_by_criteria=CompensationsService.get_compensation_output_by_criteria(compensations_extraction, companyId, criteria, round ,include,compare)

        graphs = CompensationsService.get_compensation_general_graph(general_graphcode, companyId, compensations_extraction, round)

        final_output = {"graphs": graphs, "tables": output_by_criteria}
        
        return final_output
