A Survey on Offline Reinforcement Learning: Taxonomy, Review, and Open Problems Prudencio, Rafael Figueiredo; Maximo, Marcos ROA; Colombini, Esther Luna In: IEEE Transactions on Neural Networks and Learning Systems, p.p. 1-0, 2023. @article{10078377,
title = {A Survey on Offline Reinforcement Learning: Taxonomy, Review, and Open Problems},
author = {Rafael Figueiredo Prudencio and Marcos ROA Maximo and Esther Luna Colombini},
doi = {10.1109/TNNLS.2023.3250269},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {1-0},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
|
Devices, I Choose You: Adaptive Client Selection for Effective Communication in Federated Learning (Best Paper) Souza, Allan; Bittencourt, Luiz; Cerqueira, Eduardo; Loureiro, Antonio; Villas, Leandro In: Proceedings of the XLI Brazilian Symposium on Computer Networks and Distributed Systems, pp. 1–14, sbc, Brasilia DF, 2023, ISSN: 2177-9384. @inproceedings{sbrc,
title = {Devices, I Choose You: Adaptive Client Selection for Effective Communication in Federated Learning (Best Paper)},
author = {Allan Souza and Luiz Bittencourt and Eduardo Cerqueira and Antonio Loureiro and Leandro Villas},
doi = {10.5753/sbrc.2023.499},
issn = {2177-9384},
year = {2023},
date = {2023-01-01},
booktitle = {Anais do XLI Brazilian Symposium on Computer Networks and Distributed Systems},
pages = {1–14},
publisher = {SBC},
address = {Brasília/DF},
abstract = {Federated Learning (FL) is a distributed approach to collaboratively training machine learning models. FL requires a high level of communication between the devices and a central server, thus creating several challenges, including communication bottlenecks and network scalability. In this work, we introduce DEEV, a solution to decrease the overall communication and computation costs to train a model in the FL environment. DEEV employs a client selection strategy that dynamically adapts the number of devices training the model and the number of rounds required to achieve convergence. A use case on the human activity recognition dataset is performed to evaluate DEEV and compare it to other state-of-the-art approaches. Experimental evaluations show that DEEV efficiently reduces the overall communication and computation overhead to train a model and promote its convergence. In particular, DEEV reduces communication overhead by up to 60% and computing overhead by up to 90% compared to literature approaches, while providing good convergence even in scenarios where data is distributed non-independently. and identical between client devices.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Federated Learning (FL) is a distributed approach to collaboratively training machine learning models. FL requires a high level of communication between the devices and a central server, thus creating several challenges, including communication bottlenecks and network scalability. In this work, we introduce DEEV, a solution to decrease the overall communication and computation costs to train a model in the FL environment. DEEV employs a client selection strategy that dynamically adapts the number of devices training the model and the number of rounds required to achieve convergence. A use case on the human activity recognition dataset is performed to evaluate DEEV and compare it to other state-of-the-art approaches. Experimental evaluations show that DEEV efficiently reduces the overall communication and computation overhead to train a model and promote its convergence. In particular, DEEV reduces communication overhead by up to 60% and computing overhead by up to 90% compared to literature approaches, while providing good convergence even in scenarios where data is distributed non-independently. and identical between client devices. |
Compressed Client Selection for Efficient Communication in Federated Learning Mohamed, Aissa Hadj; Assumpçao, Nícolas RG; Astudillo, Carlos A.; Souza, Allan M.; Bittencourt, Luiz F.; VillasLeandro A. In: 2023 IEEE 20th Consumer Communications & Networking Conference (CCNC), p.p. 508-516, 2023, ISSN: 2331-9860. @inproceedings{10059659,
title = {Compressed Client Selection for Efficient Communication in Federated Learning},
author = {Aissa Hadj Mohamed and Nícolas RG Assumpçao and Carlos A. Astudillo and Allan M. Souza and Luiz F. Bittencourt and Leandro A. Villas},
doi = {10.1109/CCNC51644.2023.10059659},
issn = {2331-9860},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 20th Consumer Communications & Networking Conference (CCNC)},
pages = {508-516},
abstract = {Federated learning (FL) is a distributed approach that enables collaborative training of a shared machine learning (ML) model for a given task. FL requires bandwidth-demanding communication between devices and a central server, which is a cause of many issues such as communication bottlenecks and scaling in the network. Therefore, we introduce the CCS (Compressed Client Selection) algorithm aimed at decreasing the overall communication costs for fitting a model in the FL environment. CCS employs a biased client selection strategy that reduces the number of devices training the ML model and the number of rounds required to reach convergence. In addition, the Count Sketch compression method is implemented to reduce the overhead in client-to-server communication. A use case on the Human Activity Recognition dataset is performed to evaluate CCS and compare it with other state-of-the-art approaches. Experimental evaluations show that CCS efficiently reduces the overall communication overhead for fitting a model and its convergence in a FL environment. In particular, CCS reduces up to 90% the communication overhead compared to literature approaches while providing good convergence even in scenarios where the data are not-independently and identically distributed among client devices.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Federated learning (FL) is a distributed approach that enables collaborative training of a shared machine learning (ML) model for a given task. FL requires bandwidth-demanding communication between devices and a central server, which is a cause of many issues such as communication bottlenecks and scaling in the network. Therefore, we introduce the CCS (Compressed Client Selection) algorithm aimed at decreasing the overall communication costs for fitting a model in the FL environment. CCS employs a biased client selection strategy that reduces the number of devices training the ML model and the number of rounds required to reach convergence. In addition, the Count Sketch compression method is implemented to reduce the overhead in client-to-server communication. A use case on the Human Activity Recognition dataset is performed to evaluate CCS and compare it with other state-of-the-art approaches. Experimental evaluations show that CCS efficiently reduces the overall communication overhead for fitting a model and its convergence in a FL environment. In particular, CCS reduces up to 90% the communication overhead compared to literature approaches while providing good convergence even in scenarios where the data are not-independently and identically distributed among client devices. |
NeuralMatch: Identifying Model-Based Customer Similarity in Federated Learning Talasso, Gabriel; Souza, Allan; Villas, Leandro In: Extended Proceedings of the XLI Brazilian Symposium on Computer Networks and Distributed Systems, pp. 176–183, sbc, Brasilia DF, 2023, ISSN: 2177-9384. @inproceedings{sbrc_extended,
title = {NeuralMatch: Identifying Model-Based Customer Similarity in Federated Learning},
author = {Gabriel Talasso and Allan Souza and Leandro Villas},
doi = {10.5753/sbrc_extended.2023.808},
issn = {2177-9384},
year = {2023},
date = {2023-01-01},
booktitle = {Extended Proceedings of the XLI Brazilian Symposium on Computer Networks and Distributed Systems},
pages = {176–183},
publisher = {SBC},
address = {Brasília/DF},
abstract = {Federated learning is a distributed machine learning technique that allows multiple devices to collaborate on training a common data model, while preserving user data privacy. However, federated learning presents challenges related to non-identically distributed and balanced data, which can result in less accurate models. In this way, NeuralMatch was proposed, a framework to identify similarity of models for federated learning, capable of identifying the similarity between clients without sharing data. The proposed framework can help to develop more efficient federated learning solutions to deal with the problems of not identically balanced and distributed data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Federated learning is a distributed machine learning technique that allows multiple devices to collaborate on training a common data model, while preserving user data privacy. However, federated learning presents challenges related to non-identically distributed and balanced data, which can result in less accurate models. In this way, NeuralMatch was proposed, a framework to identify similarity of models for federated learning, capable of identifying the similarity between clients without sharing data. The proposed framework can help to develop more efficient federated learning solutions to deal with the problems of not identically balanced and distributed data. |
FedPredict: Combining Global and Local Parameters in the Prediction Step of Federated Learning Capanema, Claudio GS; Souza, Allan M.; Silva, Fabrício A.; Villas, Leandro A.; Loureiro, Antonio AF In: IEEE 19th International Conference on Distributed Computing in Smart Systems and Internet of Things (DCOSS), IEEE, Paphos/Cyphrus, 2023. @inproceedings{fed_predict,
title = {FedPredict: Combining Global and Local Parameters in the Prediction Step of Federated Learning},
author = {Cláudio GS Capanema and Allan M. Souza and Fabrício A. Silva and Leandro A. Villas and Antonio AF Loureiro},
year = {2023},
date = {2023-01-01},
url date = {2023-01-01},
booktitle = {IEEE 19th International Conference on Distributed Computing in Smart Systems and Internet of Things (DCOSS)},
publisher = {IEEE},
address = {Paphos/Cyphrus},
abstract = {In traditional Federated Learning (FL), such as FedAvg, the main objective is to compute a generalized model applied to all clients. This approach is not effective in the non-IID scenario, where each client has a specific data distribution. As an alternative, personalized FL has proven to be an important research direction for dealing with clients' particularities. However, part of these solutions must be reexamined when a new client (ie, a few times trained or never trained) is added to the FL process. To address these problems, we propose FedPredict, a simple but effective federated learning approach that combines global and local (ie, personalized) model parameters of neural networks, considering their evolution and update levels. This combination is essential because our method is a plugin that operates in the prediction/inference step on the FL client side, which means that there is no modification in the learning process, and it can be coupled with other techniques. Compared to state-of-the-art solutions, FedPredict converges faster while achieving greater accuracy in various scenarios, including when new clients are added.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In traditional Federated Learning (FL), such as FedAvg, the main objective is to compute a generalized model applied to all clients. This approach is not effective in the non-IID scenario, where each client has a specific data distribution. As an alternative, personalized FL has proven to be an important research direction for dealing with clients' particularities. However, part of these solutions must be reexamined when a new client (ie, a few times trained or never trained) is added to the FL process. To address these problems, we propose FedPredict, a simple but effective federated learning approach that combines global and local (ie, personalized) model parameters of neural networks, considering their evolution and update levels. This combination is essential because our method is a plugin that operates in the prediction/inference step on the FL client side, which means that there is no modification in the learning process, and it can be coupled with other techniques. Compared to state-of-the-art solutions, FedPredict converges faster while achieving greater accuracy in various scenarios, including when new clients are added. |
Resource Aware Client Selection for Federated Learning in IoT Scenarios Maciel, Filipe; Souza, Allan M.; Bittencourt, Luiz F.; VillasLeandro A. In: IEEE 19th International Conference on Distributed Computing in Smart Systems and Internet of Things (DCOSS), IEEE, Paphos/Cyphrus, 2023. @inproceedings{ rawcs,
title = {Resource Aware Client Selection for Federated Learning in IoT Scenarios},
author = {Filipe Maciel and Allan M. Souza and Luiz F. Bittencourt and Leandro A. Villas},
year = {2023},
date = {2023-01-01},
url date = {2023-01-01},
booktitle = {IEEE 19th International Conference on Distributed Computing in Smart Systems and Internet of Things (DCOSS)},
publisher = {IEEE},
address = {Paphos/Cyphrus},
abstract = {Machine learning optimizes performance in many embedded applications. A weak point of many learning solutions is the intensive use of data and computational resources required for training the model. By default, client devices send data to a solution developer's server to perform the training process in a more computationally powerful environment. However, this approach can compromise the client's privacy, as data is transmitted to third parties for processing. Federated learning solves this problem by training the model on the client devices, thus without sharing data. The trained models are then aggregated on the server to create a generalized version that can run on every client. The federated learning protocol involves selecting which clients will participate in each training round, with selection criteria focused on maximizing the number of clients per round, controlling fairness, lowering round discards, and managing resources. However, existing selection algorithms neglect the minimization of battery consumption, which is critical in scenarios where clients have limited resources. In this paper we propose a client selection mechanism for a federated learning protocol that considers energy, processing capacity, and network quality as determinant criteria for decision. Compared to a state-of-the-art selection technique, our algorithm saves resources while maintaining the model's accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Machine learning optimizes performance in many embedded applications. A weak point of many learning solutions is the intensive use of data and computational resources required for training the model. By default, client devices send data to a solution developer's server to perform the training process in a more computationally powerful environment. However, this approach can compromise the client's privacy, as data is transmitted to third parties for processing. Federated learning solves this problem by training the model on the client devices, thus without sharing data. The trained models are then aggregated on the server to create a generalized version that can run on every client. The federated learning protocol involves selecting which clients will participate in each training round, with selection criteria focused on maximizing the number of clients per round, controlling fairness, lowering round discards, and managing resources. However, existing selection algorithms neglect the minimization of battery consumption, which is critical in scenarios where clients have limited resources. In this paper we propose a client selection mechanism for a federated learning protocol that considers energy, processing capacity, and network quality as determinant criteria for decision. Compared to a state-of-the-art selection technique, our algorithm saves resources while maintaining the model's accuracy. |
FLEXE: Investigating Federated Learning in Connected Autonomous Vehicle Simulations Lobato, Wellington; Costa, Johannes BD Da; de Souza, Allan M.; Rosario, Denis; Sommer, Christoph; VillasLeandro A. In: 2022 IEEE 96th Vehicular Technology Conference (VTC2022-Fall), p.p. 1-5, 2022, ISSN: 2577-2465. @inproceedings{10012905,
title = {FLEXE: Investigating Federated Learning in Connected Autonomous Vehicle Simulations},
author = {Wellington Lobato and Joahannes BD Da Costa and Allan M. de Souza and Denis Rosário and Christoph Sommer and Leandro A. Villas},
doi = {10.1109/VTC2022-Fall57202.2022.10012905},
issn = {2577-2465},
year = {2022},
date = {2022-09-01},
booktitle = {2022 IEEE 96th Vehicular Technology Conference (VTC2022-Fall)},
pages = {1-5},
abstract = {Due to the increased computational capacity of Connected and Autonomous Vehicles (CAVs) and concerns about transferring private information, it is becoming more and more appealing to store data locally and move network computing to the edge. This trend also extends to Machine Learning (ML) where Federated learning (FL) has emerged as an attractive solution for preserving privacy. Today, to evaluate the implemented vehicular FL mechanisms for ML training, researchers often disregard the impact of CAV mobility, network topology dynamics, or communication patterns, all of which have a large impact on the final system performance. To address this, this work presents FLEXE, an Open Source extension to Veins that offers researchers a simulation environment to run FL experiments in realistic scenarios. FLEXE combines the popular Veins framework with the OpenCV library. Using the example of traffic sign recognition, we demonstrate how FLEXE can support investigations of FL techniques in a vehicular environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Due to the increased computational capacity of Connected and Autonomous Vehicles (CAVs) and concerns about transferring private information, it is becoming more and more appealing to store data locally and move network computing to the edge. This trend also extends to Machine Learning (ML) where Federated learning (FL) has emerged as an attractive solution for preserving privacy. Today, to evaluate the implemented vehicular FL mechanisms for ML training, researchers often disregard the impact of CAV mobility, network topology dynamics, or communication patterns, all of which have a large impact on the final system performance. To address this, this work presents FLEXE, an Open Source extension to Veins that offers researchers a simulation environment to run FL experiments in realistic scenarios. FLEXE combines the popular Veins framework with the OpenCV library. Using the example of traffic sign recognition, we demonstrate how FLEXE can support investigations of FL techniques in a vehicular environment. |