import numpy as np
import scipy.ndimage as nd
import plotly.graph_objects as go
from colorsys import hls_to_rgb
from colorsys import hsv_to_rgb
import plotly.io as pio
import os 
from math import ceil,floor
os.chdir(os.path.dirname(os.path.abspath(__file__)))
from collections import defaultdict



#We use the wool package to compute periodic boundary condition, available here: https://github.com/alwinm/wool/tree/master
#The following functions are copied from this package for simplicity
def connect_faces(lf1,lf2,struct):

    stack = np.zeros([2]+list(lf1.shape))
    stack[0] = lf1
    stack[1] = lf2
    label,things = nd.label(stack > 0,structure=struct)
    if things == 0:
        return set()
    slc = nd.labeled_comprehension(stack,label,range(1,things+1),
                                   lambda a: list(set(a)),
                                   list,
                                   None,
                                   pass_positions=False)
    tuples = []
    for region in slc:
        if len(region) == 0:
            continue
        owner = np.min(region)
        for cell in region:
            if cell != owner:
                tuples += [(owner,cell)]
    return set(tuples)

def periodic(label,axis,struct):
    dim = label.ndim
    size = label.shape[axis]
    select1 = [slice(None)]*dim
    select2 = [slice(None)]*dim
    select1[axis] = 0
    select2[axis] = size-1
    lf1 = label[tuple(select1)]
    lf2 = label[tuple(select2)]
    return connect_faces(lf1,lf2,struct)


def clean_tuples(tuples):
    return sorted(set([(min(pair),max(pair)) for pair in tuples]))

def merge_tuples_unionfind(tuples):
    # use classic algorithms union find with path compression
    # https://en.wikipedia.org/wiki/Disjoint-set_data_structure
    parent_dict = {}

    def subfind(x):
        # update roots while visiting parents 
        if parent_dict[x] != x:
            parent_dict[x] = subfind(parent_dict[x])
        return parent_dict[x]

    def find(x):
        if x not in parent_dict:
            # x forms new set and becomes a root
            parent_dict[x] = x
            return x
        if parent_dict[x] != x:
            # follow chain of parents of parents to find root 
            parent_dict[x] = subfind(parent_dict[x])
        return parent_dict[x]

    # each tuple represents a connection between two items 
    # so merge them by setting root to be the lower root. 
    for p0,p1 in list(tuples):
        r0 = find(p0)
        r1 = find(p1)
        if r0 < r1:
            parent_dict[r1] = r0
        elif r1 < r0:
            parent_dict[r0] = r1

    # for unique parents, subfind the root, replace occurrences with root
    vs = set(parent_dict.values())
    for parent in vs:
        sp = subfind(parent)
        if sp != parent:
            for key in parent_dict:
                if parent_dict[key] == parent:
                    parent_dict[key] = sp

    return parent_dict



def make_dict(mask,struct,boundary,bargs):
    label,things = nd.label(mask,structure=struct)
    cs = clean_tuples(boundary(label,bargs,struct))
    slc = nd.labeled_comprehension(mask,label,range(1,things+1),
                                   lambda a,b: b,
                                   list,
                                   None,
                                   pass_positions=True)
    outdict = dict(zip(range(1,things+1),slc))
    ownerof = merge_tuples_unionfind(cs)
    for key in ownerof:
        if key != ownerof[key]:
            # add key to its owner and remove key
            outdict[ownerof[key]] = np.append(outdict[ownerof[key]],outdict[key])
            outdict.pop(key)
    return outdict,ownerof


def boundaries(label,cell_shear,struc):
    #A useful function to compute periodic boundaries with the wool package
    connectset = set()
    connectset = connectset.union(periodic(label,0,struc))
    connectset = connectset.union(periodic(label,1,struc))
    connectset = connectset.union(periodic(label,2,struc))
    return connectset



def all_visible_points(Clusters, N, s):
    #Find all points which are visible  from the viewpoint (N, N, N). The goal is to only display these points when the viewpoint is fixed to improve performance.
    #s represent the size of each marker around each of the point of Clusters, so that each point behind this marker can be removed. 
    points = np.argwhere(Clusters != 0)
    viewpoint = np.array([1.25*N, 1.25*N, 1.25*N+10])#We slightly shift the viewpoint for better results.
    
    # We calculate direction vectors from the viewpoint to each point
    direction_vectors = points - viewpoint
    
    # And then the depth of each point as the distance along the direction vector
    depths = np.linalg.norm(direction_vectors, axis=1)
    
    normal_vector = viewpoint / np.linalg.norm(viewpoint)
    
    # We find two orthogonal vectors to the normal vector via Gram-Schmidt
    up_vector = np.array([0, 0, 1]) if normal_vector[2] == 0 else np.array([1, 0, 0])
    u = up_vector - np.dot(up_vector, normal_vector) * normal_vector
    u = u / np.linalg.norm(u)
    v = np.cross(normal_vector, u)
    
    # Project points onto the plane orthogonal to the direction vector (N, N, N)
    projection_matrix = np.array([u, v]).T
    projected_points = np.dot(points, projection_matrix)
    
    # We will use a dictionary to store the minimum depth and corresponding point for each cell
    depth_grid = defaultdict(lambda: (np.inf, None))
   
    # Calculate cell indices for each point
    cell_indices = ((projected_points + N) // s).astype(int)  # shift to positive range
   
    for i, (x_idx, y_idx) in enumerate(cell_indices):
       current_depth, _ = depth_grid[(x_idx, y_idx)]
       if depths[i] < current_depth:
           depth_grid[(x_idx, y_idx)] = (depths[i], points[i])
   
    # Gather the visible points from the grid
    visible_points = np.array([point for _, point in depth_grid.values() if point is not None])
    
    return visible_points



#We now turn to computing the clusters of a 3D percolation configuration. 
def Clusters_Figure(N,Perco,Previous_color,Previous_Clusters,percent,boundary,site,HTML):
        #The output is a plotly figure of the  clusters of Perco in a cube of size N, whose size is larger than (percent)/100 times the largest cluster size. If percent is negative, we plot only the (-percent) largest clusters instead. 
        #We also output the corresponding scatter plot, the color of the clusters and their indices, which we will put back as input in the next iteration.
        #If boundary=0 then the clusters have periodic boundary condition, and otherwise we use free boundary condition.
        #If site=1, then Perco is a site percolation model, otherwise it is a bond percolation model
        #The Previous_Cluster and Previous_color are respectively the clusters label and colors at the previous iteration, so that the random colors stay constistent from one iteration to the next. Can be taken 0.15 (blue) if it is the first iteration.
        #For site percolation models, Perco is simply a list of size (N,N,N) representing the open vertices
        #For bond percolation models, Perco is a list of size (2N,2N,2N), such that for x=(i,j,k), the edge between x and x+e_1 is open if and only if field at (2*i+1,2*j,2*k) is equal to 1. Similarly for the other directions. 
        #If HTML=1 we plot all points in all the clusters for good 3D visualization using HTML files. Otherwise we assume the viewpoint is fixed (for instance as an image or video), and only represent points that are visible from the point of view of the camera. 


        #In case of bond percolation we put 1 at even sites to make sure the clusters are correctly computed.
        if site!=1:
            Perco[::2, ::2,::2]=np.ones((N,N,N))
     
        #Now, we label the clusters on the box without adjusting for connectedness at the boundaries.
        Unadjusted_Clusters = nd.label(Perco)[0]
        
        if boundary==0:
            #Merge Clusters connected at the boundaries using the wool package.
            struc=[[[0,0,0],[0,1,0],[0,0,0]],[[0,1,0],[1,1,1],[0,1,0]],[[0,0,0],[0,1,0],[0,0,0]]]
            od,ownerof = make_dict(Unadjusted_Clusters,struc,boundaries,1)
            Unadjusted_Clusters_temp = np.zeros(np.prod(Unadjusted_Clusters.shape))
            for key in od:
                    Unadjusted_Clusters_temp[od[key]] = key
            Unadjusted_Clusters = Unadjusted_Clusters_temp.reshape(Unadjusted_Clusters.shape)
                    
        # In case of bond percolation, we remove the edges to just keep the clusters of vertices which are connected by open edges in Perco.
        if site!=1:
            Adjusted_Clusters=Unadjusted_Clusters[::2, ::2,::2]
        else:
            Adjusted_Clusters=Unadjusted_Clusters

        #We now compute the cluster sizes and corresponding labels
        Cluster_Labels, Cluster_Sizes = np.unique(Adjusted_Clusters, return_counts=True)           
        Cluster_Sizes = Cluster_Sizes[1:]
        Cluster_Labels = Cluster_Labels[1:]
        Cluster_Sizes_Order=np.sort(Cluster_Sizes)
        Cluster_Sizes_Order_Label=np.argsort(Cluster_Sizes)
        Cluster_Labels_Order=Cluster_Labels[Cluster_Sizes_Order_Label]

        Adjusted_Clusters_Largest=np.zeros((N,N,N),int)
        if percent>=0:
            #We only keep the clusters with size at least percent/100 of the biggest.
            First_Large_Cluster=next(x for x,y in enumerate(Cluster_Sizes_Order) if y>percent*Cluster_Sizes_Order[-1]/100)
            number_clusters=len(Cluster_Sizes)-First_Large_Cluster
            for i in np.arange(First_Large_Cluster,len(Cluster_Sizes)):
                Adjusted_Clusters_Largest[Adjusted_Clusters==Cluster_Labels_Order[i]]=len(Cluster_Sizes)-i
        else:
            #We only keep the (-percent) largest clusters, or less if there are not enough clusters.
            number_clusters=np.minimum(len(Cluster_Labels_Order),-int(percent))
            for i in np.arange(1,number_clusters+1):
                Adjusted_Clusters_Largest[Adjusted_Clusters==Cluster_Labels_Order[-i]]=i
            
            
        if percent>=0:
            #When percent>0 we now identify, for each cluster C in Previous_cluster, which cluster of Perco which is included in C is the biggest such cluster.
            #The goal will be to have some consistency in the colors: if C had a certain color in the previous picture, we want the biggest cluster included in C in the current cluster to have the same color (we took a convention so that the probability to be open decreases at each step)
            #The following variables record respectively the size and the label of the biggest cluster of Perco inside each cluster of Previous_Cluster
            Max_Cluster_Previous=np.zeros(len(Previous_color))
            Max_Cluster_Previous_Label=np.zeros(len(Previous_color))
            #For each cluster of perco, we also record what was the color of the corresponding previous cluster in Previous_Clusters, stored in the variable Previous_color.
            Color=np.zeros(number_clusters)
            for i in np.arange(number_clusters):
                #k is an arbitrarly chosen index of the box at which one can find the cluster number i in Perco
                k=next(x for x,y in np.ndenumerate(Adjusted_Clusters_Largest) if y==i+1)
                #S is the size of the (i+1)-th largest cluster
                S=Cluster_Sizes_Order[-i-1]
                if Previous_Clusters[k[0],k[1],k[2]]!=0:
                    Color[i]=Previous_color[Previous_Clusters[k[0],k[1],k[2]]-1]
                    #We now test if cluster i is bigger than all previously tested clusters which correspond to the same cluster of Previous_Clusters
                    if S>Max_Cluster_Previous[Previous_Clusters[k[0],k[1],k[2]]-1]:
                        Max_Cluster_Previous[Previous_Clusters[k[0],k[1],k[2]]-1]=S
                        Max_Cluster_Previous_Label[Previous_Clusters[k[0],k[1],k[2]]-1]=Adjusted_Clusters_Largest[k[0],k[1],k[2]]
        else:
             Color=np.zeros(number_clusters)

                
        #We define a colorscale so that the biggest cluster is blue, second largest is red, third is green, fourth is purple, fifth is yellow, sixth is cyan, and all the other ones are assigned random colors  among 100 possible ones.
        #We add a little bit of variation in the colors for better visualisation.     
        length=8
        colorscale=[]
        for i in np.arange(0,length):
            colorscale.append([(2*i)/(25*length), 'rgb(0, 0, 200)'])
            colorscale.append([(2*i+1)/(25*length), 'rgb(77, 77, 255)'])
        for i in np.arange(0,length):
             colorscale.append([(2*length+2*i)/(25*length), 'rgb(255, 26, 26)'])
             colorscale.append([(2*length+2*i+1)/(25*length), 'rgb(180, 0, 0)'])
        for i in np.arange(0,length):
            colorscale.append([(4*length+2*i)/(25*length), 'rgb(43, 170, 36)'])
            colorscale.append([(4*length+2*i+1)/(25*length), 'rgb(52, 204, 44)'])
        for i in np.arange(0,length):
                colorscale.append([(6*length+2*i)/(25*length), 'rgb(75, 6, 156)'])
                colorscale.append([(6*length+2*i+1)/(25*length), 'rgb(101, 9, 209)'])
        for i in np.arange(0,length):
            colorscale.append([(8*length+2*i)/(25*length), 'rgb(246, 179, 6)'])
            colorscale.append([(8*length+2*i+1)/(25*length), 'rgb(211, 142, 13)'])
        for i in np.arange(0,length):
            colorscale.append([(10*length+2*i)/(25*length), 'rgb(9, 178, 201)'])
            colorscale.append([(10*length+2*i+1)/(25*length), 'rgb(10, 219, 247)'])


        number_color=50
        length2=10
        for k in np.arange(0,number_color):
             for i in np.arange(1,length2+1):
                color1=hsv_to_rgb(k/(number_color),0.88,0.9)
                color2=hsv_to_rgb(k/(number_color),0.88,0.79)
                colorscale.append([0.5+(2*k*length2+2*i-1)/(8*length2*number_color), 'rgb'+str(color1)])
                colorscale.append([0.5+(2*k*length2+(2*i))/(8*length2*number_color), 'rgb'+str(color2)])
        for k in np.arange(0,number_color):
             for i in np.arange(1,length2+1):
                color1=hls_to_rgb(k/(number_color),0.46,0.8)
                color2=hls_to_rgb(k/(number_color),0.58,0.8)
                colorscale.append([0.75+(2*k*length2+2*i-1)/(8*length2*number_color), 'rgb'+str(color1)])
                colorscale.append([0.75+(2*k*length2+2*i)/(8*length2*number_color), 'rgb'+str(color2)])
                
        
        # We now assign the colors to the clusters of Perco which are not among the biggest one that we computed in Max_Cluster_Label
        # We first check among the 6 first colors which one were not used at the previous iteration when percent >0, and if percent<0 we just keep all colors.
        if percent>0:
            Free_Colors=np.sort(np.setdiff1d(np.arange(0.06,0.06+6*0.08,0.08),Previous_color[Previous_color<0.5]))
            Free_Colors_Iter=iter(Free_Colors)
            for l in np.arange(number_clusters):
                if l+1 not in Max_Cluster_Previous_Label:
                    #We first assign specific nice colors to the 6th biggest clusters, among the colors which did not appear in the previous slide.
                    if len(Free_Colors)!=0 and k!=Free_Colors[-1]:
                        k=next(Free_Colors_Iter)
                        Color[l]=k
                    #We then assign a new color (between 0.5 and 1) to any other cluster.
                    else:
                        Color[l]=0.5+(np.random.randint(1,2*N**2)*length)/(4*length*N**2)
        else:
            for l in np.arange(number_clusters):
                if l<7:
                    Color[l]=0.06+l*0.08
                else:    
                    Color[l]=0.5+(np.random.randint(1,2*N**2)*length)/(4*length*N**2)

                    
        if HTML==1:
            #If HTML=1 we count all points as visible
            visible_points=np.argwhere(Adjusted_Clusters_Largest != 0)
        else:
            #Otherwise we remove all points hidden from the viewpoint (N,N,N) using the function all_visible_points. The size of the markers is supposed to correspond to 0.2, this can easily be adapted in case of different markers.
            visible_points = all_visible_points(Adjusted_Clusters_Largest, N, 0.1)
            
            

        #We now define three vectors Largestx, Largesty and Largestz corresponding to the coordinates of the components
        #We further assign each point with its new color in Largestc, modifying it slightly for better visualisation. We also put the cluster size in hovertext in Largesth.
        Largestx=[2*N,2*N+1]
        Largesty=[2*N,2*N+1]
        Largestz=[2*N,2*N+1]
        if visible_points.any():
            Largestx=Largestx+list(visible_points[:, 0])
            Largesty=Largesty+list(visible_points[:, 1])
            Largestz=Largestz+list(visible_points[:, 2])
        Largestc=[0,1]
        Largesth=[0,0]
        for i in visible_points:
            l=int(Adjusted_Clusters_Largest[i[0],i[1],i[2]])-1
            if HTML==1:
                if Color[l]>0.5:
                    Largestc.append(Color[l]-min([i[0],i[1],i[2],N-i[0],N-i[1],N-i[2]])/(4*N*number_color))
                else:
                    Largestc.append(Color[l]-min([i[0],i[1],i[2],N-i[0],N-i[1],N-i[2]])*2/(50*N))
            else:
                if Color[l]>0.5:
                    Largestc.append(Color[l]-min([N-i[0],N-i[1],N-i[2]])/(4*N*number_color))
                else:
                    Largestc.append(Color[l]-min([N-i[0],N-i[1],N-i[2]])*2/(50*N))
            Largesth.append(['Number of points='+str(int(Cluster_Sizes_Order[-l-1])),Color[l]])


        
        
        #We can finally generate the figure 
        scatter=go.Scatter3d(x=Largestx, y=Largesty,z=Largestz,mode='markers'
                                        ,marker_color=Largestc, marker_colorscale=colorscale,hovertext=Largesth)
        fig=go.Figure(data=[scatter])
                                         #)
                            #])
        
        
        fig.update(layout_coloraxis_showscale=False)
        fig.update_traces(showlegend=False) 
        fig.update_layout(scene = dict(
            xaxis_title='',
            yaxis_title='',
            zaxis_title='',
            xaxis_range=[-2,N+2],
            yaxis_range=[-2,N+2],
            zaxis_range=[-2,N+2],
            xaxis=dict(showticklabels=False),
            yaxis=dict(showticklabels=False),
            zaxis=dict(showticklabels=False),
            )) 
        fig.update_layout(scene=dict(camera=dict(eye=dict(x=1.25, y=1.25, z=1.25)), 
                                                                                             xaxis=dict(backgroundcolor='rgba(0, 0, 0, 0.14)'),
                                                                                             yaxis=dict(backgroundcolor='rgba(0, 0, 0, 0.14)'),
                                                                                             zaxis=dict(backgroundcolor='rgba(0, 0, 0, 0.14)'),
                                                                                             aspectmode='cube', 
                                                                                             aspectratio=dict(x=1, y=1, z=1)
                                                                                             ))

        # We use square symbols, with a size of 400/N. However for small or large N these squares can appear too big or too small, and need to be adjusted manually. 
        fig.update_traces(
            marker=dict(size=400/N, symbol="square"),
            selector=dict(mode="markers"),)
        
        

        return [fig,scatter,Color,Adjusted_Clusters_Largest]
    
    
    
def Percolation_Image(N,Perco,h,percent,boundary,site,name,paraname,HTML):
    #This function generates a single image of the clusters in Perco, both as png and html
    #Only the clusters with volume larger than (percent)/100 times the volume of the largest cluster are displayed
    #h is the parameter of the percolation model defining Perco
    #If percent is negative, we display only the (-percent) largest clusters instead.
    #If boundary=0 then the clusters have periodic boundary condition, and otherwise we use free boundary condition.
    #If site=1, then Perco is a site percolation model, otherwise it is a bond percolation model
    #For site percolation models, Perco is simply a list of size (N,N,N) representing the open vertices
    #For bond percolation models, Perco is a list of size (2N,2N,2N), such that for x=(i,j,k), the edge between x and x+e_1 is open if and only if field at (2*i+1,2*j,2*k) is equal to 1. Similarly for the other directions. 
    #Name is a name for the percolation model used in the saved image, paraname is the name for the percolation parameter
    #Put HTML=1 if you want to use the html file, otherwise put html=0 for better performance.
    
    print('Starting Image Simulation for N='+str(N)+' and '+str(paraname)+'='+str(np.around(h,5)))

    #We take the default initial values for the color scheme, since there will be no iteration. 0.06 is blue in my convention, and this corresponds to the color of the supercritical giant cluster. 
    Previous_color=np.array([0.06])
    Previous_Clusters=np.ones((N,N,N),int)
    


    #We plot an image of the clusters in Perco 
    fig=Clusters_Figure(N, Perco, Previous_color,Previous_Clusters,percent,boundary,site,HTML)[0]

    fig.update_layout(
        margin = {'l':25,'r':25,'t':0,'b':70},
        )
    
    
    fig.update_traces(
            marker=dict(size=220/N, symbol="square"),
            selector=dict(mode="markers"),)
    
    #We go to the correct folder
    if not os.path.exists(str(name)):
        os.mkdir(str(name))
    os.chdir(str(name))

    if percent>0:
        percent_name=str(percent)+"_Percent_Largest"
    else:
        percent_name=str(-percent)+"_Largest"
    if boundary==0:
        boundary_name="Perio_Boundary"
    else:
        boundary_name="Free_Boundary"

    if not os.path.exists(str(name)+"_"+str(boundary_name)+"_"+str(percent_name)):
        os.mkdir(str(name)+"_"+str(boundary_name)+"_"+str(percent_name))
    os.chdir(str(name)+"_"+str(boundary_name)+"_"+str(percent_name))

    #We give a name to our image which avoids deleting previously simulated images, and then create it
    counter=1
    while os.path.isfile(str(name)+"_N="+str(N)+"_"+str(paraname)+"="+str(np.around(h,5))+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+".png") or os.path.isfile(str(name)+"_N="+str(N)+"_"+str(paraname)+"="+str(np.around(h,4))+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+".html"):
        counter += 1  
        
    #fig.show()
    fig.write_html(str(name)+"_N="+str(N)+"_"+str(paraname)+"="+str(np.around(h,5))+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+".html")
    pio.write_image(fig, str(name)+"_N="+str(N)+"_"+str(paraname)+"="+str(np.around(h,5))+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+".png",scale=5, width=1080, height=1080, engine='kaleido')

    os.chdir("..")
    os.chdir("..")




def Percolation_Video_Frames(N,Perco,Previous_color,Previous_Clusters,h,hrange,percent,boundary,site,name,paraname):
    #This function generates one frame of the clusters in Perco, which is a percolation model at level h
    #These frames can then be combined to make a video of all the frames which appear in hrange
    #Only the clusters with volume larger than (percent)/100 times the volume of the largest cluster are displayed
    #If percent is negative, we display only the (-percent) largest clusters instead.
    #If boundary=0 then the clusters have periodic boundary condition, and otherwise we use free boundary condition.
    #If site=1, then Perco is a site percolation model, otherwise it is a bond percolation model
    #For site percolation models, Perco is simply a list of size (N,N,N) representing the open vertices
    #For bond percolation models, Perco is a list of size (2N,2N,2N), such that for x=(i,j,k), the edge between x and x+e_1 is open if and only if field at (2*i+1,2*j,2*k) is equal to 1. Similarly for the other directions. 
    #Name is a name for the percolation model used in the saved images and video, paraname is the name for the percolation parameter

    krange=np.arange(len(hrange))
    kcurrent=np.argwhere(hrange==h)[0,0]
    hrange=np.around(hrange,5)

    
    #We define a slider that will be plotted at the bottom.
    #The first slider is there to see fewer ticks, otherwise it becomes messy. We only put ticks in the slider if 10*hslider is an integer. Can be changed depending on the range of parameters choosed. 
    #We add two ticks with invisible labels to make sure the extremities of the slider are correct        
    steps=[]
    if hrange[-1]-hrange[0]>0.5:
        hrange_slider=np.arange(floor(hrange[0]*10)/10,ceil(hrange[-1]*10)/10+0.05,0.1)
        for i in hrange_slider:
            step = dict(
                method="update",
                args=[{"visible": [False] * len(hrange_slider)}],  # layout attribute
                label=round(i,1))
            steps.append(step)
    else:
        hrange_slider=np.arange(floor(hrange[0]*100)/100,ceil(hrange[-1]*100)/100+0.005,0.01)
        for i in hrange_slider:
            step = dict(
                method="update",
                args=[{"visible": [False] * len(hrange_slider)}],  # layout attribute
                label=round(i,2))
            steps.append(step)


    
    #The second slider is there to see the current value of h 
    steps2=[]
    if hrange[-1]-hrange[0]>0.5:
        step_slider=0.0001 
        hrange_slider2=np.arange(floor(hrange[0]*10)/10,ceil(hrange[-1]*10)/10+step_slider/2,step_slider)
    else:
        step_slider=0.00001 
        hrange_slider2=np.arange(floor(hrange[0]*100)/100,ceil(hrange[-1]*100)/100+step_slider/2,step_slider)
    for i in hrange_slider2:
        step = dict(
            method="update",
            args=[{"visible": [False] * len(krange)}],  # layout attribute
            label='')
        steps2.append(step)
    
                
            
    sliders = [dict(
    active=0,
    bgcolor='white',
    bordercolor='white',
    currentvalue={"prefix": str(paraname)+"=","visible":True,"font":{'color':'white'}},
    pad={"t": len(steps)},
    font = {'size': 27},
    len=1,
    minorticklen=0,
    xanchor='left',
    steps=steps,
    ),
    dict(
    active=0,
    currentvalue={"prefix": str(paraname)+"=","visible":True},
    pad={"t": len(steps2)},
    font = {'size': 27},
    len=1,
    minorticklen=0,
    ticklen=0,
    xanchor='left',
    steps=steps2,
    )]


    #We now create a figure of the clusters in Perco, and update the values of the Previous_color and Previous_Clusters for the next iteration.
    fig,scatter,Previous_color,Previous_Clusters=Clusters_Figure(N, Perco, Previous_color,Previous_Clusters,percent,boundary,site,0)

    #We add an annotation with the current value of h since above it was deleted from the slider2 to not display the additional ticks. 
    fig.add_annotation(x=-0.002, y=-0.045, showarrow=False, text=str(paraname)+"="+str(h), font=dict(size=27))
    
    
    #We add the slider to visualise the value of h.
    fig.layout.update(sliders=sliders)
    fig.layout.sliders[1].update(active=np.argwhere(np.around(hrange_slider2,4)==round(h,4))[0,0])
    fig.update_layout(updatemenus=[dict(type='buttons',
              showactive=False,
              y=-0.10,
              x=-0.05,
              xanchor='left',
              yanchor='bottom')
                        ])

    # position the slider (closer to the plot)
    fig['layout']['sliders'][0]['pad']=dict(r= 0, t= 0.0)
    fig['layout']['sliders'][1]['pad']=dict(r= 0, t= 0.0)

        
    fig.update_layout(
    margin = {'l':25,'r':25,'t':0,'b':130},
            )
    

    #We are now ready to save the corresponding frame:
    #fig.show()
    pio.write_image(fig, f'{name}_N={N}_{len(hrange)-kcurrent:03d}.png',scale=1, width=1080, height=1080, engine='kaleido')
    return [Previous_color,Previous_Clusters]


def Percolation_Video(N,Percos,hrange,percent,boundary,site,name,paraname):
    #This function generates a video file for the clusters in Percos, using the previous Percolation_Video_Slide function
    #Percos>=h corresponds to the clusters for the parameter h. hrange is the list of parameters h used to generate Percos.
    #Only the clusters with volume larger than (percent)/100 times the volume of the largest cluster are displayed
    #If percent is negative, we display only the (-percent) largest clusters instead.
    #If boundary=0 then the clusters have periodic boundary condition, and otherwise we use free boundary condition.
    #If site=1, then Perco is a site percolation model, otherwise it is a bond percolation model
    #For site percolation models, Perco is simply a list of size (N,N,N) representing the open vertices
    #For bond percolation models, Perco is a list of size (2N,2N,2N), such that for x=(i,j,k), the edge between x and x+e_1 is open if and only if field at (2*i+1,2*j,2*k) is equal to 1. Similarly for the other directions. 
    #Name is a name for the percolation model used in the saved html file, paraname is the name for the percolation parameter

    
    print('Starting Video Simulation for N='+str(N))

    #We create a folder for the frames of the video 
    if not os.path.exists(str(name)):
        os.mkdir(str(name))
    os.chdir(str(name))
    
    if percent>0:
        percent_name=str(percent)+"_Percent_Largest"
    else:
        percent_name=str(-percent)+"_Largest"
    if boundary==0:
        boundary_name="Perio_Boundary"
    else:
        boundary_name="Free_Boundary"

    if not os.path.exists(str(name)+"_"+str(boundary_name)+"_"+str(percent_name)):
        os.mkdir(str(name)+"_"+str(boundary_name)+"_"+str(percent_name))
    os.chdir(str(name)+"_"+str(boundary_name)+"_"+str(percent_name))


    counter=1
    while os.path.exists(str(name)+'_Frames'+"_N="+str(N)+"_"+str(boundary_name)+"_"+str(percent_name)+'_'+str(counter)) or os.path.isfile(str(name)+"_N="+str(N)+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+'.mp4'):
        counter += 1   
    os.mkdir(str(name)+'_Frames'+"_N="+str(N)+"_"+str(boundary_name)+"_"+str(percent_name)+'_'+str(counter))
    os.chdir(str(name)+'_Frames'+"_N="+str(N)+"_"+str(boundary_name)+"_"+str(percent_name)+'_'+str(counter))

    
    hrange=np.around(hrange,8)
    krange=np.arange(len(hrange))
    
    
    #We initialize the color scheme. 0.06 is blue in my convention, and this corresponds to the color of the supercritical giant cluster. 
    Previous_color=np.array([0.06])
    Previous_Clusters=np.ones((N,N,N),int)
    


    #We first plot an image of the middle frame of hrange, which usually correspond to criticality, as well as one on the left, the right, and at the beginning. The goal is to have a snapshot of what the video will look like, since simulating the whole video can take a long time. 
    h=hrange[len(hrange)//2]
    Percolation_Video_Frames(N,Percos>=h,Previous_color,Previous_Clusters,h,hrange,percent,boundary,site,name,paraname)

    h=hrange[(len(hrange)//2)+(len(hrange)//10)]
    Percolation_Video_Frames(N,Percos>=h,Previous_color,Previous_Clusters,h,hrange,percent,boundary,site,name,paraname)
    
    h=hrange[(len(hrange)//2)-(len(hrange)//10)]
    Percolation_Video_Frames(N,Percos>=h,Previous_color,Previous_Clusters,h,hrange,percent,boundary,site,name,paraname)

    h=hrange[len(hrange)-1]
    Percolation_Video_Frames(N,Percos>=h,Previous_color,Previous_Clusters,h,hrange,percent,boundary,site,name,paraname)

    #We now generate the frames of the video 
    print('Plotting frames for '+str(paraname)+'=')
    for kcurrent in krange:
        
        h=hrange[kcurrent]
        print(h)
        [Previous_color,Previous_Clusters]=Percolation_Video_Frames(N,Percos>=h,Previous_color,Previous_Clusters,h,hrange,percent,boundary,site,name,paraname)
        #We record the colors and clusters at each step to make sure they stay consistent from one step to the next

    # We adjust the frame rate to achieve 40-second video (can be easily changed).
    num_frames = len(hrange)
    frame_rate = num_frames / 40  
    
    
    # Create video from frames using ffmpeg
    os.chdir("..")
    video_path = str(name)+"_N="+str(N)+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+'.mp4'
    os.system(f'ffmpeg -y -framerate {frame_rate} -i {name}_Frames_N={N}_{boundary_name}_{percent_name}_{counter}/{name}_N={N}_%03d.png -vcodec libx264 -r 30 -pix_fmt yuv420p -s 1080x1080 -t 42 {video_path}> NUL 2>&1')
    os.chdir("..")
    os.chdir("..")




def Percolation_Slide(N,Percos,hrange,percent,boundary,site,name,paraname):
    #This function generates an html file for the clusters in Percos, with possibility to use a slider to change the parameters.
    #Percos>=h corresponds to the clusters for the parameter h. hrange is the list of parameters h used to generate Percos.
    #Only the clusters with volume larger than (percent)/100 times the volume of the largest cluster are displayed
    #If percent is negative, we display only the (-percent) largest clusters instead.
    #If boundary=0 then the clusters have periodic boundary condition, and otherwise we use free boundary condition.
    #If site=1, then Perco is a site percolation model, otherwise it is a bond percolation model
    #For site percolation models, Perco is simply a list of size (N,N,N) representing the open vertices
    #For bond percolation models, Perco is a list of size (2N,2N,2N), such that for x=(i,j,k), the edge between x and x+e_1 is open if and only if field at (2*i+1,2*j,2*k) is equal to 1. Similarly for the other directions. 
    #Name is a name for the percolation model used in the saved html file, paraname is the name for the percolation parameter

    print('Starting Slider Simulation for N='+str(N))
    krange=np.arange(len(hrange))

        
    #We initialize the color scheme. 0.06 is blue in my convention, and this corresponds to the color of the supercritical giant cluster. 
    Previous_color=np.array([0.06])
    Previous_Clusters=np.ones((N,N,N),int)
    
    #We define a slider that will be plotted at the bottom
    steps=[]
    for i in krange:
        step = dict(
            method="update",
            args=[{"visible": [False] * len(krange)}],  # layout attribute
            label=round(hrange[i],4))
        step["args"][0]["visible"][i] = True  # Toggle i'th trace to "visible"
        steps.append(step)
        
    sliders = [dict(
    active=0,
    currentvalue={"prefix": str(paraname)+"="
        },
    pad={"t": len(krange)},
    font = {'size': 27},
    len=1,
    minorticklen=0,
    xanchor='left',
    steps=steps
    )]


    
    #We retrieve the scatter of all points and add them to a figure.
    fig = go.Figure()
    print('Plotting percolation clusters at level '+str(paraname)+'=')

    for kcurrent in krange:
        h=np.around(hrange[kcurrent],4)
        print(h)        
        fig2,scatter,Previous_color,Previous_Clusters=Clusters_Figure(N, Percos>=h, Previous_color,Previous_Clusters,percent,boundary,site,1)
        fig.add_trace(scatter)


    #We adapt the parameters of the figure 
    fig.data[1].visible = True
            
    fig.layout.update(sliders=sliders)
    fig.update(layout_coloraxis_showscale=False)
    fig.update_traces(showlegend=False) 
    fig.update_layout(scene = dict(
        xaxis_title='',
        yaxis_title='',
        zaxis_title='',
        xaxis_range=[-2,N+2],
        yaxis_range=[-2,N+2],
        zaxis_range=[-2,N+2],
        xaxis=dict(showticklabels=False),
        yaxis=dict(showticklabels=False),
        zaxis=dict(showticklabels=False),
        )) 
    fig.update_layout(scene=dict(camera=dict(eye=dict(x=1.25, y=1.25, z=1.25)), 
           xaxis=dict(backgroundcolor='rgba(0, 0, 0, 0.14)'),
           yaxis=dict(backgroundcolor='rgba(0, 0, 0, 0.14)'),
           zaxis=dict(backgroundcolor='rgba(0, 0, 0, 0.14)'),
           aspectmode='cube', 
           aspectratio=dict(x=1, y=1, z=1)
           ))

    fig.update_traces(
    marker=dict(size=210/N, symbol="square"),
    selector=dict(mode="markers"),)
    
    # position the slider (closer to the plot)
    fig['layout']['sliders'][0]['pad']=dict(r= 0, t= 0.0)

        
    fig.update_layout(
        margin = {'l':25,'r':25,'t':0,'b':130},
        )
    
    #We go to the correct folder    
    if not os.path.exists(str(name)):
        os.mkdir(str(name))
    os.chdir(str(name))

    if percent>0:
        percent_name=str(percent)+"_Percent_Largest"
    else:
        percent_name=str(-percent)+"_Largest"
    if boundary==0:
        boundary_name="Perio_Boundary"
    else:
        boundary_name="Free_Boundary"

    if not os.path.exists(str(name)+"_"+str(boundary_name)+"_"+str(percent_name)):
        os.mkdir(str(name)+"_"+str(boundary_name)+"_"+str(percent_name))
    os.chdir(str(name)+"_"+str(boundary_name)+"_"+str(percent_name))

    #We give a name to our html file which avoids deleting previously simulated files, and then create it
    counter=1
    while os.path.isfile(str(name)+"_N="+str(N)+"_"+str(paraname)+"="+str(np.around(hrange,5))+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+".html"):
        counter += 1   
        
    #fig.show()
    fig.write_html(str(name)+"_N="+str(N)+"_"+str(paraname)+"="+str(np.around(hrange,5))+"_"+str(boundary_name)+"_"+str(percent_name)+"_"+str(counter)+".html")
    os.chdir("..")
    os.chdir("..")
