<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3.dtd">
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="1.3" xml:lang="en">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">sensors</journal-id>
      <journal-title-group>
        <journal-title>Sensors</journal-title>
        <abbrev-journal-title abbrev-type="publisher">Sensors</abbrev-journal-title>
        <abbrev-journal-title abbrev-type="pubmed">Sensors</abbrev-journal-title>
      </journal-title-group>
      <issn pub-type="epub">1424-8220</issn>
      <publisher>
        <publisher-name>MDPI</publisher-name>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="doi">10.3390/s24123980</article-id>
      <article-id pub-id-type="publisher-id">sensors-24-03980</article-id>
      <article-version vocab="JAV" vocab-identifier="https://www.mdpi.com/1424-8220/24/12/3980/pdf" vocab-term="Version of Record" article-version-type="VoR">v2</article-version>
      <article-categories>
        <subj-group>
          <subject>Article</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Energy-Efficient PPG-Based Respiratory Rate Estimation Using Spiking Neural Networks</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid" authenticated="true">https://orcid.org/0000-0003-0726-1628</contrib-id>
          <name>
            <surname>Yang</surname>
            <given-names>Geunbo</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Software" vocab-term-identifier="https://credit.niso.org/contributor-roles/software/">Software</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing – original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing – original draft</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization/">Visualization</role>
          <xref rid="af1-sensors-24-03980" ref-type="aff">1</xref>
          <xref rid="fn1-sensors-24-03980" ref-type="fn">†</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid" authenticated="true">https://orcid.org/0000-0002-0573-7007</contrib-id>
          <name>
            <surname>Kang</surname>
            <given-names>Youngshin</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation/">Validation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis/">Formal analysis</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation/">Investigation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing – original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing – original draft</role>
          <xref rid="af1-sensors-24-03980" ref-type="aff">1</xref>
          <xref rid="fn1-sensors-24-03980" ref-type="fn">†</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid" authenticated="true">https://orcid.org/0000-0003-3836-8655</contrib-id>
          <name>
            <surname>Charlton</surname>
            <given-names>Peter H.</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing – original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing – original draft</role>
          <xref rid="af2-sensors-24-03980" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid" authenticated="true">https://orcid.org/0000-0002-2868-485X</contrib-id>
          <name>
            <surname>Kyriacou</surname>
            <given-names>Panayiotis A.</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation/">Data curation</role>
          <xref rid="af3-sensors-24-03980" ref-type="aff">3</xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Kim</surname>
            <given-names>Ko Keun</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology/">Methodology</role>
          <xref rid="af4-sensors-24-03980" ref-type="aff">4</xref>
        </contrib>
        <contrib contrib-type="author" corresp="yes">
          <name>
            <surname>Li</surname>
            <given-names>Ling</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
          <xref rid="af3-sensors-24-03980" ref-type="aff">3</xref>
          <xref rid="c1-sensors-24-03980" ref-type="corresp">*</xref>
        </contrib>
        <contrib contrib-type="author" corresp="yes">
          <contrib-id contrib-id-type="orcid" authenticated="true">https://orcid.org/0000-0001-8042-007X</contrib-id>
          <name>
            <surname>Park</surname>
            <given-names>Cheolsoo</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/conceptualization/">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing – original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft/">Writing – original draft</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision/">Supervision</role>
          <xref rid="af1-sensors-24-03980" ref-type="aff">1</xref>
          <xref rid="c1-sensors-24-03980" ref-type="corresp">*</xref>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Bevilacqua</surname>
            <given-names>Alessandro</given-names>
          </name>
          <role>Academic Editor</role>
        </contrib>
      </contrib-group>
      <aff id="af1-sensors-24-03980"><label>1</label>Department of Computer Engineering, Kwangwoon University, Seoul 01897, Republic of Korea; <email>rmsqhwkd2@gmail.com</email> (G.Y.); <email>ysin0414@gmail.com</email> (Y.K.)</aff>
      <aff id="af2-sensors-24-03980"><label>2</label>Department of Public Health and Primary Care, University of Cambridge, Cambridge CB1 8RN, UK; <email>pc657@medschl.cam.ac.uk</email></aff>
      <aff id="af3-sensors-24-03980"><label>3</label>Department of Engineering, School of Science and Technology (SST), City University of London, London EC1V 0HB, UK; <email>p.kyriacou@city.ac.uk</email></aff>
      <aff id="af4-sensors-24-03980"><label>4</label>AI Lab, LG Electronics, Seoul 06763, Republic of Korea; <email>kokeun.kim@lge.com</email></aff>
      <author-notes>
        <corresp id="c1-sensors-24-03980"><label>*</label>Correspondence: <email>caroline.li@city.ac.uk</email> (L.L.); <email>parkcheolsoo@kw.ac.kr</email> (C.P.)</corresp>
        <fn id="fn1-sensors-24-03980">
          <label>†</label>
          <p>These authors contributed equally to this work.</p>
        </fn>
      </author-notes>
      <pub-date pub-type="epub">
        <day>19</day>
        <month>06</month>
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="collection">
        <month>06</month>
        <year>2024</year>
      </pub-date>
      <volume>24</volume>
      <issue>12</issue>
      <elocation-id>3980</elocation-id>
      <history>
        <date date-type="received">
          <day>16</day>
          <month>05</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>12</day>
          <month>06</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>18</day>
          <month>06</month>
          <year>2024</year>
        </date>
      </history>
      <permissions>
        <copyright-statement>© 2024 by the authors.</copyright-statement>
        <copyright-year>2024</copyright-year>
        <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
          <license-p>Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>).</license-p>
        </license>
      </permissions>
      <abstract>
        <p>Respiratory rate (RR) is a vital indicator for assessing the bodily functions and health status of patients. RR is a prominent parameter in the field of biomedical signal processing and is strongly associated with other vital signs such as blood pressure, heart rate, and heart rate variability. Various physiological signals, such as photoplethysmogram (PPG) signals, are used to extract respiratory information. RR is also estimated by detecting peak patterns and cycles in the signals through signal processing and deep-learning approaches. In this study, we propose an end-to-end RR estimation approach based on a third-generation artificial neural network model—spiking neural network. The proposed model employs PPG segments as inputs, and directly converts them into sequential spike events. This design aims to reduce information loss during the conversion of the input data into spike events. In addition, we use feedback-based integrate-and-fire neurons as the activation functions, which effectively transmit temporal information. The network is evaluated using the BIDMC respiratory dataset with three different window sizes (16, 32, and 64 s). The proposed model achieves mean absolute errors of 1.37 ± 0.04, 1.23 ± 0.03, and 1.15 ± 0.07 for the 16, 32, and 64 s window sizes, respectively. Furthermore, it demonstrates superior energy efficiency compared with other deep learning models. This study demonstrates the potential of the spiking neural networks for RR monitoring, offering a novel approach for RR estimation from the PPG signal.</p>
      </abstract>
      <kwd-group>
        <kwd>spiking neural network</kwd>
        <kwd>physiological signal</kwd>
        <kwd>healthcare</kwd>
        <kwd>photoplethysmogram</kwd>
        <kwd>respiratory rate</kwd>
      </kwd-group>
      <funding-group>
        <award-group>
          <funding-source>Technology Innovation Program</funding-source>
          <funding-source>Ministry of Trade, Industry and Energy (MOTIE, Korea)</funding-source>
          <award-id>RS-2022-00154678</award-id>
        </award-group>
        <award-group>
          <funding-source>Korea Institute for Advancement of Technology (KIAT)</funding-source>
          <funding-source>Korea Government (MOTIE)</funding-source>
          <award-id>P0017124</award-id>
        </award-group>
        <award-group>
          <funding-source>MOTIE (Ministry of Trade, Industry, and Energy) in Korea</funding-source>
          <funding-source>Korea Institute for Advancement of Technology (KIAT)</funding-source>
          <award-id>P0017308</award-id>
        </award-group>
        <award-group>
          <funding-source>Kwangwoon University</funding-source>
        </award-group>
        <funding-statement>This work was supported by the Technology Innovation Program (RS-2022-00154678, Development of Intelligent Sensor Platform Technology for Connected Sensor) funded By the Ministry of Trade, Industry and Energy (MOTIE, Korea), by Korea Institute for Advancement of Technology (KIAT) grant funded by the Korea Government (MOTIE) (P0017124, HRD Program for Industrial Innovation), and by the MOTIE (Ministry of Trade, Industry, and Energy) in Korea, under the Fostering Global Talents for Innovative Growth Program (P0017308) supervised by the Korea Institute for Advancement of Technology (KIAT). Additionally, The present research has been conducted by the Excellent researcher support project of Kwangwoon University in 2023.</funding-statement>
      </funding-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="intro" id="sec1-sensors-24-03980">
      <title>1. Introduction</title>
      <p>Respiration is a fundamental biological process that absorbs oxygen and eliminates carbon dioxide via the process of inhalation and exhalation [<xref ref-type="bibr" rid="B1-sensors-24-03980">1</xref>]. Respiratory information provides reliable data for detecting changes in a patient’s health status, along with other vital signs such as body temperature (BT), blood pressure (BP), and heart rate (HR). In particular, respiratory rate (RR), which is the number of breaths per minute, is an important indicator obtained from various biomedical signals, such as photoplethysmogram (PPG) and electrocardiogram (ECG) signals, to assess the clinical status of patients. In addition, the variability of the continuous respiratory signal could be utilized to prevent not only respiratory disorders and lung diseases but also cardiac arrest [<xref ref-type="bibr" rid="B1-sensors-24-03980">1</xref>,<xref ref-type="bibr" rid="B2-sensors-24-03980">2</xref>,<xref ref-type="bibr" rid="B3-sensors-24-03980">3</xref>,<xref ref-type="bibr" rid="B4-sensors-24-03980">4</xref>].</p>
      <p>PPG signal provides respiratory information, and it is a frequently utilized biomedical signal in RR estimation because the measurement method is convenient, low cost, and noninvasive [<xref ref-type="bibr" rid="B5-sensors-24-03980">5</xref>,<xref ref-type="bibr" rid="B6-sensors-24-03980">6</xref>]. PPG sensors measure changes in the blood volume from vessels near the skin, thereby acquiring information related to various vital signs [<xref ref-type="bibr" rid="B7-sensors-24-03980">7</xref>]. The frequency range of a typical PPG signal is between 0.1 and 5 Hz. For  accurate BP estimation, typical cut-off frequencies range from 0.3-4.5 Hz, whereas cut-off frequencies between 0.4–3 Hz are required for HR estimation [<xref ref-type="bibr" rid="B8-sensors-24-03980">8</xref>,<xref ref-type="bibr" rid="B9-sensors-24-03980">9</xref>]. In healthy adults, the RR ranges from 12–20 breaths per minute [<xref ref-type="bibr" rid="B10-sensors-24-03980">10</xref>,<xref ref-type="bibr" rid="B11-sensors-24-03980">11</xref>]. This enables the detection of most of the RR information at a relatively lower frequency than the frequencies associated with blood pressure and heart rate.</p>
      <p>Previous studies related to PPG-based RR estimation have focused on two approaches: signal processing and deep learning-based RR estimation [<xref ref-type="bibr" rid="B12-sensors-24-03980">12</xref>,<xref ref-type="bibr" rid="B13-sensors-24-03980">13</xref>]. In signal processing-based approaches, frequency analysis is employed to extract from a PPG signal the components in the frequency domain that are associated with respiration. There are various frequency analysis methods such as the empirical mode decomposition (EMD) and wavelet transform [<xref ref-type="bibr" rid="B14-sensors-24-03980">14</xref>,<xref ref-type="bibr" rid="B15-sensors-24-03980">15</xref>,<xref ref-type="bibr" rid="B16-sensors-24-03980">16</xref>,<xref ref-type="bibr" rid="B17-sensors-24-03980">17</xref>]. In addition, methods such as respiratory-induced intensity variation (RIIV), respiratory-induced amplitude variation (RIAV), and respiratory-induced frequency variation (RIFV) [<xref ref-type="bibr" rid="B12-sensors-24-03980">12</xref>,<xref ref-type="bibr" rid="B18-sensors-24-03980">18</xref>] detect the optimal frequency band by decomposing the PPG signal, which modulates the PPG signal caused by respiration.</p>
      <p>With the advancement of deep learning technology, RR estimation methods using various network structures have been proposed. For a robust estimation, Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>] proposed an end-to-end convolution neural network (CNN) with a residual block. They used three convolution blocks in parallel to extract various features related to respiration. All blocks in their network were composed of 1-D convolution blocks, leading to a reduction in the network size. Chowdhury et al. [<xref ref-type="bibr" rid="B20-sensors-24-03980">20</xref>] also proposed a lightweight deep learning network for RR prediction. They added a projection layer at the front of the network to reduce the size of the input, followed by a residual module with depth-wise separable convolution blocks for a lightweight structure [<xref ref-type="bibr" rid="B21-sensors-24-03980">21</xref>]. Spiking neural networks (SNNs) have recently attracted the attention of researchers, as an alternate to the lightweight deep neural networks (DNNs) for real-time monitoring, owing to their low computational cost and high energy efficiency [<xref ref-type="bibr" rid="B21-sensors-24-03980">21</xref>,<xref ref-type="bibr" rid="B22-sensors-24-03980">22</xref>,<xref ref-type="bibr" rid="B23-sensors-24-03980">23</xref>,<xref ref-type="bibr" rid="B24-sensors-24-03980">24</xref>].</p>
      <p>SNNs are brain-inspired third-generation models that mimic neuronal dynamics [<xref ref-type="bibr" rid="B25-sensors-24-03980">25</xref>]. <xref ref-type="fig" rid="sensors-24-03980-f001">Figure 1</xref> illustrates the differences between a DNN and SNN. In contrast to DNNs, which propagate real-valued output, SNNs employ a discrete event-driven action potential called ‘spike trains’ as a temporal input and output. To generate spike trains as inputs for SNNs, real-value inputs are converted using various spike-encoding methods to transmit information [<xref ref-type="bibr" rid="B26-sensors-24-03980">26</xref>,<xref ref-type="bibr" rid="B27-sensors-24-03980">27</xref>,<xref ref-type="bibr" rid="B28-sensors-24-03980">28</xref>]. Traditional spike-encoding schemes are classified into two categories: rate and temporal encoding. These encoding methods are widely used in SNN studies to convert visual information into spike trains. Rate encoding employs the probabilistic approach of the Poisson process, where a spike probabilistically occurs through a stimulus, such as pixel values in image data or the power spectrum in the frequency domain of time-series data. In contrast, temporal encoding focuses on the timing of the spike occurrence rather than the frequency of the encoding information. To manage the temporal spike trains, a biological spiking neuron model was incorporated into the SNNs. Temporal information is transmitted through accumulation and firing based on a threshold value in the spiking neuron, which leads to output spike trains that are directed towards the next neurons.</p>
      <p>SNNs suffer from information loss during the encoding process [<xref ref-type="bibr" rid="B29-sensors-24-03980">29</xref>,<xref ref-type="bibr" rid="B30-sensors-24-03980">30</xref>,<xref ref-type="bibr" rid="B31-sensors-24-03980">31</xref>]. In addition, the non-differential characteristics of spike trains impose limitations on learning in SNNs. Therefore, SNNs have been studied mainly in classification rather than regression fields [<xref ref-type="bibr" rid="B32-sensors-24-03980">32</xref>]. Recently, multiple studies have been conducted to combine the learning mechanisms and network structures of SNNs and DNNs for achieving a performance comparable to that of DNNs while maintaining energy efficiency. Sengupta et al. [<xref ref-type="bibr" rid="B33-sensors-24-03980">33</xref>] proposed a deep spiking neural network (DSNN) with VGG [<xref ref-type="bibr" rid="B34-sensors-24-03980">34</xref>] and residual architectures [<xref ref-type="bibr" rid="B35-sensors-24-03980">35</xref>]. To overcome the inherent challenges of SNNs, they adopted an artificial neural network for the spiking neural network conversion method. They conducted pretraining under the ReLU-based ANN structure, and then converted the weights for the initialization of the SNNs; this will preserve the weights from the ANN, minimize information loss, and enhance performance. Despite the loss of information, Guerrero et al. [<xref ref-type="bibr" rid="B36-sensors-24-03980">36</xref>] evaluated an event-based regression problem using a DSNN. They utilized the temporal relationship of continuous spike patterns via a recurrent neural network (RNN)-like neuron model to demonstrate the possibility of regression fields.</p>
      <p>In this study, we proposed an SNN framework for RR estimation. In addition to the energy efficiency advantages of SNNs, an SNN architecture combined with CNNs was adopted to ensure accurate performance. The contributions of this study are summarized as follows:<list list-type="bullet"><list-item><p>We designed an end-to-end SNN architecture using a feedback-based neuronal model. To the best of our knowledge, this is the first regression study that applies end-to-end SNN to real-world PPG data.</p></list-item><list-item><p>We employed a direct encoding method to convert real-valued PPG segments into spatial–temporal spike trains. We generated explainable spike trains for RR estimation via trainable convolution blocks with a biological neuron model.</p></list-item><list-item><p>We compared the proposed model with other deep learning methods and demonstrated that the proposed model had an accuracy comparable to that of existing DNN models while being more energy efficient.</p></list-item></list></p>
      <p>The remainder of this paper is structured as follows: <xref ref-type="sec" rid="sec2-sensors-24-03980">Section 2</xref> illustrates the proposed network structure and methodology. <xref ref-type="sec" rid="sec3-sensors-24-03980">Section 3</xref> presents the experimental settings for evaluating the proposed model and compares the benchmark results with other DNN architectures. <xref ref-type="sec" rid="sec4-sensors-24-03980">Section 4</xref> discusses further details of the proposed model and analyzes the experimental results. Finally, <xref ref-type="sec" rid="sec5-sensors-24-03980">Section 5</xref> concludes the paper and suggests directions for future research.</p>
    </sec>
    <sec id="sec2-sensors-24-03980">
      <title>2. Materials and Method</title>
      <sec id="sec2dot1-sensors-24-03980">
        <title>2.1. Data and Preprocessing</title>
        <p>To evaluate the proposed model, the BIDMC PPG, and Respiration dataset [<xref ref-type="bibr" rid="B37-sensors-24-03980">37</xref>] from PhysioNet [<xref ref-type="bibr" rid="B38-sensors-24-03980">38</xref>], containing signals extracted from the MIMIC II matched waveform database [<xref ref-type="bibr" rid="B39-sensors-24-03980">39</xref>], was used. The dataset consisted of 53 recordings of PPG and impedance respiratory signals acquired from adult patients aged 19–90 years at the Beth Israel Deaconess Medical Center (Boston, MA, USA). Each recording was made for 8 min and sampled at 125 Hz. The dataset was collected by an analog-to-digital converter (ADC) with 16-bit precision.</p>
        <p>In this study, reference RR was obtained from the annotations of the dataset, which were sampled at 1 Hz. To minimize the influence of other components, a bandpass filter with a cutoff frequency between 0.1–0.6 Hz (6–36 breaths per minute) was used in the extraction of respiratory information. Furthermore, to ensure sufficient data for training and validation, we applied a data augmentation strategy. We augmented the data by overlapping the PPG signals at 1 s intervals. For each 16 s PPG segment, we obtained the next PPG segment overlapped by shifting 125 data points. As a result, the number of segments was increased 15 times through the data augmentation.</p>
      </sec>
      <sec id="sec2dot2-sensors-24-03980">
        <title>2.2. Spiking Neuron Model</title>
        <p>Various biologically plausible spiking neuron models, such as the Hodgkin–Huxley (HH), Izhikevich, integrate-and-fire (IF), and leaky integrate-and-fire (LIF), have been proposed to transmit information converted into spike trains [<xref ref-type="bibr" rid="B40-sensors-24-03980">40</xref>,<xref ref-type="bibr" rid="B41-sensors-24-03980">41</xref>,<xref ref-type="bibr" rid="B42-sensors-24-03980">42</xref>]. In particular, the IF and LIF neuron models have been employed in numerous studies to leverage the advantages of both biological plausibility and computational efficiency. In this section, we describe the two neuron models used in the proposed network: soft-reset IF and recurrent IF neuron models.</p>
        <p>(1) Soft-reset IF neuron model: This neuron model is utilized in the spike encoder to minimize the information loss during spike conversion. <xref ref-type="fig" rid="sensors-24-03980-f002">Figure 2</xref> shows the differences between the hard and soft-reset mechanisms. In the hard-reset approach, the membrane potential is reset to the reset voltage regardless of whether it exceeds the threshold. However, in the soft-reset approach, the membrane potential is initialized with a specific voltage that exceeds the threshold. The soft-reset IF neuron model is defined as follows:<disp-formula id="FD1-sensors-24-03980"><label>(1)</label><mml:math id="mm1" display="block"><mml:semantics><mml:mrow><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfenced separators="" open="{" close=""><mml:mtable><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mn>1</mml:mn><mml:mo>,</mml:mo></mml:mrow></mml:mtd><mml:mtd columnalign="center"><mml:mrow><mml:mi>if</mml:mi><mml:mspace width="4.pt"/><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>−</mml:mo><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mo>≥</mml:mo><mml:mn>0</mml:mn></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd columnalign="center"><mml:mrow><mml:mn>0</mml:mn><mml:mo>,</mml:mo></mml:mrow></mml:mtd><mml:mtd columnalign="center"><mml:mrow><mml:mi>if</mml:mi><mml:mspace width="4.pt"/><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>−</mml:mo><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mo>&lt;</mml:mo><mml:mn>0</mml:mn></mml:mrow></mml:mtd></mml:mtr></mml:mtable></mml:mfenced></mml:mrow></mml:semantics></mml:math></disp-formula>
        <disp-formula id="FD2-sensors-24-03980"><label>(2)</label><mml:math id="mm2" display="block"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>−</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>−</mml:mo><mml:msub><mml:mi>s</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>−</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>−</mml:mo><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub><mml:mo>)</mml:mo></mml:mrow><mml:msub><mml:mi>s</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        <disp-formula id="FD3-sensors-24-03980"><label>(3)</label><mml:math id="mm3" display="block"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>s</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>.</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>Equation (<xref ref-type="disp-formula" rid="FD1-sensors-24-03980">1</xref>) explains the Heaviside step function for spike activation. <inline-formula><mml:math id="mm4"><mml:semantics><mml:mrow><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:semantics></mml:math></inline-formula> denotes the output spike train, <inline-formula><mml:math id="mm5"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:semantics></mml:math></inline-formula> is the membrane potential at the <inline-formula><mml:math id="mm6"><mml:semantics><mml:msub><mml:mi>t</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> time step in the <inline-formula><mml:math id="mm7"><mml:semantics><mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> layer, and <inline-formula><mml:math id="mm8"><mml:semantics><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> is a constant threshold voltage set as hyperparameter. If the membrane potential reaches the threshold, an output spike train is fired. Equation (<xref ref-type="disp-formula" rid="FD2-sensors-24-03980">2</xref>) expresses the soft-reset mechanism, where the membrane potential <inline-formula><mml:math id="mm9"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:semantics></mml:math></inline-formula> depends on the Equation (<xref ref-type="disp-formula" rid="FD1-sensors-24-03980">1</xref>). If the spike is fired, the reset voltage is set to the difference between the previous membrane potential and the threshold; otherwise, it is maintained at the current value. Equation (<xref ref-type="disp-formula" rid="FD3-sensors-24-03980">3</xref>) expresses the IF neuron model, where <inline-formula><mml:math id="mm10"><mml:semantics><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> denotes the trainable weights between the <inline-formula><mml:math id="mm11"><mml:semantics><mml:msub><mml:mi>i</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> and <inline-formula><mml:math id="mm12"><mml:semantics><mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> layers.</p>
        <p>(2) Recurrent IF neuron model: The conventional IF neuron model in Equation (<xref ref-type="disp-formula" rid="FD3-sensors-24-03980">3</xref>) accumulates the temporal information of the spike trains for sequential processing. However, there is no direct dependency among the time instances. In other words, updating the neuron from the current timestep is not influenced by the information from the previous timesteps. We utilized the feedback-based IF neuron model to incorporate information from the previous timestep and update of the current state of the neuron, whose mathematical model is defined in Equation (<xref ref-type="disp-formula" rid="FD4-sensors-24-03980">4</xref>):<disp-formula id="FD4-sensors-24-03980"><label>(4)</label><mml:math id="mm13" display="block"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>,</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:msub><mml:mi>s</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>s</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        where <inline-formula><mml:math id="mm14"><mml:semantics><mml:msub><mml:mi>W</mml:mi><mml:mrow><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> denotes the recurrent weight of the <inline-formula><mml:math id="mm15"><mml:semantics><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:semantics></mml:math></inline-formula>th layer. It leverages the relationship between the time instances by merging the information from the current spike trains in the <inline-formula><mml:math id="mm16"><mml:semantics><mml:mrow><mml:mo>(</mml:mo><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:semantics></mml:math></inline-formula>th layer with the previous spike trains in the <italic>i</italic>th layer.</p>
      </sec>
      <sec id="sec2dot3-sensors-24-03980">
        <title>2.3. Spike Encoding</title>
        <p>Spike encoding is a crucial step in the processing of real-valued data using spike trains. The effective conversion of information into spike trains with minimal loss is crucial to the performance of the SNN model. Rate and temporal encoding methods have demonstrated good performance in classification tasks. Nevertheless, these traditional methods have limitations owing to information loss during the encoding process. In this study, we used a combination of direct encoding approaches and convolution to minimize the loss between the model prediction and its ground truth, enabling the direct conversion of PPG segments into spike trains without the need for additional processing steps.</p>
        <p>We adopted a single-layer trainable 1D-CNN to encode information specifically related to respiration. The proposed method efficiently encoded only the respiratory information by extracting the temporal features of the PPG signal, which were obtained via trained convolution filters. Spike trains were generated through the accumulation and firing of respiratory information using a soft-reset IF neuron model that enables encoding with minimal loss.</p>
      </sec>
      <sec id="sec2dot4-sensors-24-03980">
        <title>2.4. Surrogate Gradient Learning</title>
        <p>The most critical challenge in training SNNs is the non-differential problem of the spike activation function [<xref ref-type="bibr" rid="B43-sensors-24-03980">43</xref>]. Backpropagation learning, which relies on differentiation, is typically used in the standard learning procedure for DNNs. However, the Heaviside step function was used for spike activation, as described in Equation (<xref ref-type="disp-formula" rid="FD1-sensors-24-03980">1</xref>). It imposes constraints on backpropagation learning owing to its non-differentiability at the instance when <inline-formula><mml:math id="mm17"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:semantics></mml:math></inline-formula> equals <inline-formula><mml:math id="mm18"><mml:semantics><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula>. To address this limitation, a surrogate gradient learning method is proposed, which introduces a differentiable surrogate function that approximates the behavior of the discontinuous Heaviside step function. The surrogate function enables the utilization of optimization techniques based on gradients, thereby facilitating the training process for SNNs, as described in Equation (<xref ref-type="disp-formula" rid="FD5-sensors-24-03980">5</xref>) with the first derivative.
        <disp-formula id="FD5-sensors-24-03980"><label>(5)</label><mml:math id="mm19" display="block"><mml:semantics><mml:mrow><mml:mi>S</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mi>x</mml:mi><mml:mrow><mml:mi>α</mml:mi><mml:msqrt><mml:mrow><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:mi>x</mml:mi><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:msqrt></mml:mrow></mml:mfrac><mml:mo>+</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        <disp-formula id="FD6-sensors-24-03980"><label>(6)</label><mml:math id="mm20" display="block"><mml:semantics><mml:mrow><mml:msup><mml:mi>S</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>x</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>α</mml:mi><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mn>1</mml:mn><mml:mo>+</mml:mo><mml:msup><mml:mi>x</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mo>)</mml:mo></mml:mrow><mml:mfrac><mml:mn>3</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:msup></mml:mrow></mml:mfrac><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        where <italic>x</italic> denotes the <inline-formula><mml:math id="mm21"><mml:semantics><mml:mrow><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mo>+</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>−</mml:mo><mml:msub><mml:mi>V</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mi>h</mml:mi></mml:mrow></mml:msub></mml:mrow></mml:semantics></mml:math></inline-formula>, and <inline-formula><mml:math id="mm22"><mml:semantics><mml:mi>α</mml:mi></mml:semantics></mml:math></inline-formula> is the scaling parameter to adjust the slope.</p>
        <p><xref ref-type="fig" rid="sensors-24-03980-f003">Figure 3</xref>a illustrates the Heaviside step function and approximate functions corresponding to various parameter values. The parameter <inline-formula><mml:math id="mm23"><mml:semantics><mml:mi>α</mml:mi></mml:semantics></mml:math></inline-formula> is experimentally chosen as 0.65, which best approximates the Heaviside step function. <xref ref-type="fig" rid="sensors-24-03980-f003">Figure 3</xref>b depicts the gradient functions corresponding to the functions shown in <xref ref-type="fig" rid="sensors-24-03980-f003">Figure 3</xref>a. By replacing the non-differentiable spike activation function with the proposed surrogate function, the network can be effectively trained in an approximated environment.</p>
      </sec>
      <sec id="sec2dot5-sensors-24-03980">
        <title>2.5. Network Structure</title>
        <p>To overcome the limitations of SNN prediction, a CNN-SNN architecture is proposed that combines convolution operations with the SNN architecture. The proposed network consists of three layers: a spike-encoding layer, spike-hidden layer, and spike-decoding layer; the spike hidden layer was employed iteratively twice. The information regarding the input signal is continuously processed across the <italic>T</italic> time steps within the network to ensure an accurate interpretation of the input signal. Consequently, the average of the output values over the <italic>T</italic> time steps is calculated to predict the RR. The proposed network paradigm is illustrated in <xref ref-type="fig" rid="sensors-24-03980-f004">Figure 4</xref>.</p>
        <p><bold>Spike Encoding Layer:</bold> In traditional encoding methods, the input data are transformed into spike trains before being fed into neural networks. In contrast, this study employs a trainable machine learning-based encoding method that directly utilizes a neural network for spike conversion. The features related to respiration were extracted through convolution operations. Subsequently, the soft-reset IF neurons described in Equations (1)–(3) was employed to generate spike trains.</p>
        <p><bold>Spike Hidden Layer:</bold> This layer includes two convolution blocks Each convolution block comprises a 1-D convolution, batch normalization, and recurrent IF neurons. The number of hidden layers was experimentally determined since the increase of the number of hidden layers causes a loss of accuracy.</p>
        <p><bold>Spike Decoding Layer:</bold> This layer is composed of a pooling layer, recurrent IF neurons, and a fully connected layer. A pooling operation was applied to aggregate the information and simultaneously reduce the number of spatial features. The respiratory information converted into spike form from the hidden layer was analyzed to estimate the RR.</p>
      </sec>
      <sec id="sec2dot6-sensors-24-03980">
        <title>2.6. Model Evaluation</title>
        <p>To assess the model performance, we adopted Pearson’s Correlation Coefficient (PCC) and Mean Absolute Error (MAE) in breath per minute, depending on the sizes of the PPG segment [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>]:<disp-formula id="FD7-sensors-24-03980"><label>(7)</label><mml:math id="mm24" display="block"><mml:semantics><mml:mrow><mml:mi>P</mml:mi><mml:mi>C</mml:mi><mml:mi>C</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mo>∑</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mi>Y</mml:mi><mml:mo>−</mml:mo><mml:mover><mml:mi>Y</mml:mi><mml:mo>¯</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mo>−</mml:mo><mml:mover><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mo>¯</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow></mml:mrow><mml:msqrt><mml:mrow><mml:mo>∑</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:mi>Y</mml:mi><mml:mo>−</mml:mo><mml:mover><mml:mi>Y</mml:mi><mml:mo>¯</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup><mml:msqrt><mml:mrow><mml:mo>∑</mml:mo><mml:msup><mml:mrow><mml:mo>(</mml:mo><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mo>−</mml:mo><mml:mover><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mo>¯</mml:mo></mml:mover><mml:mo>)</mml:mo></mml:mrow><mml:mn>2</mml:mn></mml:msup></mml:mrow></mml:msqrt></mml:mrow></mml:msqrt></mml:mfrac><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        <disp-formula id="FD8-sensors-24-03980"><label>(8)</label><mml:math id="mm25" display="block"><mml:semantics><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>E</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mi>n</mml:mi><mml:mi>W</mml:mi></mml:mrow></mml:mfrac><mml:mo>∑</mml:mo><mml:mrow><mml:mrow><mml:mo>|</mml:mo><mml:mi>Y</mml:mi><mml:mo>−</mml:mo></mml:mrow><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mrow><mml:mo>|</mml:mo></mml:mrow></mml:mrow><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        where <italic>Y</italic> and <inline-formula><mml:math id="mm26"><mml:semantics><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup></mml:semantics></mml:math></inline-formula> denote the true and estimated RR, respectively. <inline-formula><mml:math id="mm27"><mml:semantics><mml:mover><mml:mi>Y</mml:mi><mml:mo>¯</mml:mo></mml:mover></mml:semantics></mml:math></inline-formula> and <inline-formula><mml:math id="mm28"><mml:semantics><mml:mover><mml:msup><mml:mi>Y</mml:mi><mml:mo>′</mml:mo></mml:msup><mml:mo>¯</mml:mo></mml:mover></mml:semantics></mml:math></inline-formula> are the averages of the true and estimated RRs, respectively. W is the window size of the PPG segment.</p>
        <p>Furthermore, we adopted the following methods to measure the energy efficiency of the proposed SNN and DNN model [<xref ref-type="bibr" rid="B44-sensors-24-03980">44</xref>]:<disp-formula id="FD9-sensors-24-03980"><label>(9)</label><mml:math id="mm29" display="block"><mml:semantics><mml:mrow><mml:mi>E</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>D</mml:mi><mml:mi>N</mml:mi><mml:mi>N</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mo>ℓ</mml:mo><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mi>L</mml:mi></mml:munderover><mml:mi>F</mml:mi><mml:msup><mml:mi>L</mml:mi><mml:mo>ℓ</mml:mo></mml:msup><mml:mo>∗</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        <disp-formula id="FD10-sensors-24-03980"><label>(10)</label><mml:math id="mm30" display="block"><mml:semantics><mml:mrow><mml:mi>E</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>S</mml:mi><mml:mi>N</mml:mi><mml:mi>N</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mo>=</mml:mo><mml:mi>F</mml:mi><mml:msup><mml:mi>L</mml:mi><mml:mo>ℓ</mml:mo></mml:msup><mml:mo>∗</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mo>ℓ</mml:mo><mml:mo>=</mml:mo><mml:mn>2</mml:mn></mml:mrow><mml:mi>L</mml:mi></mml:munderover><mml:mi>F</mml:mi><mml:msup><mml:mi>L</mml:mi><mml:mo>ℓ</mml:mo></mml:msup><mml:mo>∗</mml:mo><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>A</mml:mi><mml:mi>C</mml:mi></mml:mrow></mml:msub><mml:mo>,</mml:mo></mml:mrow></mml:semantics></mml:math></disp-formula>
        where <inline-formula><mml:math id="mm31"><mml:semantics><mml:mrow><mml:mi>F</mml:mi><mml:msup><mml:mi>L</mml:mi><mml:mo>ℓ</mml:mo></mml:msup></mml:mrow></mml:semantics></mml:math></inline-formula> denotes the number of floating-point operations at layer <italic>ℓ</italic>, <inline-formula><mml:math id="mm32"><mml:semantics><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>M</mml:mi><mml:mi>A</mml:mi><mml:mi>C</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> is the energy consumption used for the multiply-–accumulate (multiplication and addition) operations, and <inline-formula><mml:math id="mm33"><mml:semantics><mml:msub><mml:mi>E</mml:mi><mml:mrow><mml:mi>A</mml:mi><mml:mi>C</mml:mi></mml:mrow></mml:msub></mml:semantics></mml:math></inline-formula> is the energy consumption for the accumulated (addition) operations. To count the number of floating point operations for each layer, we utilized the ptflops library. Furthermore, we assumed that the energy consumption for the addition process was 0.1 pJ and the multiplication process was 3.1 pJ, referring to [<xref ref-type="bibr" rid="B45-sensors-24-03980">45</xref>].</p>
        <p>We divided the training and test data at the subject level. From the BIDMC respiratory dataset, 40 subjects were randomly selected for the training process and 13 of them were used for the test process. Furthermore, a five-fold cross-validation method was applied during the training process. For the benchmark test, a CNN-LSTM model with three convolution layers and one LSTM layer [<xref ref-type="bibr" rid="B46-sensors-24-03980">46</xref>], a CNN-RNN model with three convolution layers and one RNN layer [<xref ref-type="bibr" rid="B47-sensors-24-03980">47</xref>], and a VGG-8 model were chosen [<xref ref-type="bibr" rid="B34-sensors-24-03980">34</xref>]. The detailed parameter settings are presented in <xref ref-type="table" rid="sensors-24-03980-t001">Table 1</xref>.</p>
      </sec>
    </sec>
    <sec id="sec3-sensors-24-03980">
      <title>3. Experimental Results</title>
      <p>The proposed model was implemented using the SpikingJelly framework based on the PyTorch library. The model training was conducted with a batch size of 16, a learning rate of 0.0005, and the Adam optimization method in an environment with an Intel Core i7-7700 CPU at 3.60 GHz and a GeForce RTX 4070ti GPU.</p>
      <sec id="sec3dot1-sensors-24-03980">
        <title>3.1. Model Accuracy</title>
        <p><xref ref-type="table" rid="sensors-24-03980-t002">Table 2</xref> lists the PCC and MAE performance corresponding to three different window sizes of the PPG segment. Experiments were conducted with 4, 8, and 16 timesteps of the spike trains. The proposed model demonstrated outstanding performance despite an increase in the number of time steps. The optimal performance was achieved when the time step T was set to 8. Generally, the mean firing rate plays a more critical role than the patterns of neuronal firing in SNN studies of classification problems [<xref ref-type="bibr" rid="B48-sensors-24-03980">48</xref>,<xref ref-type="bibr" rid="B49-sensors-24-03980">49</xref>,<xref ref-type="bibr" rid="B50-sensors-24-03980">50</xref>]. Therefore, it was demonstrated that the performance improved with an increase in the timesteps during decision making. However, in the proposed model, longer timesteps of the spike trains did not improve performance.</p>
        <p><xref ref-type="table" rid="sensors-24-03980-t003">Table 3</xref> lists the PCC and MAE performances of the proposed model compared with those of other DNN approaches. The proposed model outperformed the CNN-RNN and VGG-8 models with MAE values of 1.37 ± 0.04 and 1.15 ± 0.07 bpm when the window sizes were 16 and 64, respectively. It also yielded better performance than the VGG-8 model, with an MAE of 1.23 ± 0.03 bpm at a window size of 32. The model of Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>] exhibited the best performance for the 16 and 32 window sizes with MAE of 1.34 ± 0.01 and 1.11 ± 0.01 bpm. The CNN-LSTM showed the best performance with MAE of 1.11 ± 0.03 bpm in 64 window size. However, the overall results of the proposed model achieved comparable performance with the CNN-LSTM, CNN-RNN and Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>] models.</p>
        <p><xref ref-type="fig" rid="sensors-24-03980-f005">Figure 5</xref>a–c display the training and validation loss curves of the proposed model and the other DNN models with window sizes of 16, 32, and 64, respectively. The x-axis represents the training and validation epochs, and the y-axis represents the mean squared error (MSE) losses. The validation losses converge in all cases, and these curves validate the reliability of the results presented in <xref ref-type="table" rid="sensors-24-03980-t002">Table 2</xref>. In particular, <xref ref-type="fig" rid="sensors-24-03980-f005">Figure 5</xref>c displays the optimal convergence compared to <xref ref-type="fig" rid="sensors-24-03980-f005">Figure 5</xref>a,b.</p>
        <p>In addition to the PCC in <xref ref-type="table" rid="sensors-24-03980-t004">Table 4</xref>, we performed visualization to evaluate the reliability of each estimated RR. <xref ref-type="fig" rid="sensors-24-03980-f006">Figure 6</xref> shows the Bland–Altman graphs between the estimated RR and ground-truth RR. <xref ref-type="fig" rid="sensors-24-03980-f006">Figure 6</xref>a,b illustrate the results for window sizes of 16 and 32 s using the best performing model, the Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>] network as indicated in <xref ref-type="table" rid="sensors-24-03980-t004">Table 4</xref>. <xref ref-type="fig" rid="sensors-24-03980-f006">Figure 6</xref>c shows the results for a 64-second window size, using the CNN-LSTM network. <xref ref-type="fig" rid="sensors-24-03980-f006">Figure 6</xref>d–f visualize the results of the proposed model. The x-axis represents the average of two measurements, and the y-axis represents difference between the two measurements. For both the best performing models and the proposed model, the majority of the data points contain within the 95% confidence interval, demonstrating the reliability of the models.</p>
      </sec>
      <sec id="sec3dot2-sensors-24-03980">
        <title>3.2. Computational Cost and Energy Consumption</title>
        <p><xref ref-type="table" rid="sensors-24-03980-t004">Table 4</xref> presents the floating point operations per seconds (FLOPs) and energy costs for the proposed SNN model and other DNN models. DNNs utilize MAC operations as metrics for FLOPs, whereas the proposed model employs synaptic operations [<xref ref-type="bibr" rid="B51-sensors-24-03980">51</xref>]. The proposed model showed comparable performance to other DNN models in terms of MAE performance. Furthermore, compared to the best performing model, it demonstrated 18.6, 18.7, and 64.6 times higher energy efficiency, and 1.05, 1.1, and 3.97 times lower floating-point operation counts for the window sizes of 16, 32, and 64, respectively.</p>
      </sec>
    </sec>
    <sec sec-type="discussion" id="sec4-sensors-24-03980">
      <title>4. Discussion</title>
      <p>Our approach utilized a suitable network architecture to perform a regression test from time-series medical data, whereas most other SNN studies have focused on classification problems owing to their poor accuracy performance caused by information loss. In particular, a spiking neuron model was designed with a recurrent structure similar to RNNs [<xref ref-type="bibr" rid="B47-sensors-24-03980">47</xref>], reflecting the spike information from previous time instances in the next one. Therefore, the temporal dependencies among the different time instances can be enhanced using recurrent spiking neurons with the feature extraction capabilities of CNNs. However, there is a limitation in learning long-term dependencies. To address this limitation, surrogate gradient learning has been proposed, which introduces a differentiable surrogate function that approximates the behavior of a discontinuous Heaviside step function. The surrogate function enables the utilization of optimization methods based on a gradient process, thereby conducting a training process for the SNNs.</p>
      <p>To analyze the outstanding performance of the proposed SNN model, we visualized the spike patterns derived from the PPG signals (see <xref ref-type="fig" rid="sensors-24-03980-f007">Figure 7</xref>a–c). <xref ref-type="fig" rid="sensors-24-03980-f007">Figure 7</xref> shows the results from randomly selected clean and noisy PPG signals. <xref ref-type="fig" rid="sensors-24-03980-f007">Figure 7</xref>a shows the raw PPG signals, its true respiratory signal, and the result of its spike encoding. The PPG signal was bandpass filtered into the respiratory band corresponding to 0.1–0.6 Hz. The respiratory information was perfectly captured in the spike pattern, which was generated around the peaks of the respiratory signal, indicating that it contained sufficient respiratory information. Furthermore, <xref ref-type="fig" rid="sensors-24-03980-f007">Figure 7</xref>b,c illustrate that the respiratory information was effectively represented, even with noisy PPG signals, except for the minor regions depicted in the red dashed box.</p>
      <p>We set the number of time steps to eight. In other words, the CNN extracts the features from the PPG signals, and the process of generating spikes with the soft-reset IF neurons is repeated eight times. In this process, the membrane voltage value from the previous time step is carried over to the next time step, since the neuron’s membrane voltage is not reset at each time step. Consequently, spikes are produced at the valleys of the respiratory signal in noisy PPG signals despite the application of the bandpass filter to extract the respiratory information from the low-frequency components of the PPG signal, leading to the error. To minimize these errors, noise reduction methods such as smoothing filters could be applied. Furthermore, utilizing the multiple convolution layers can be also used to extract accurate respiratory information.</p>
    </sec>
    <sec sec-type="conclusions" id="sec5-sensors-24-03980">
      <title>5. Conclusions</title>
      <p>In this study, an SNN-based model for respiratory rate prediction was proposed and compared with deep learning models in terms of accuracy and energy cost. By enhancing the time dependency through the recurrent structure, the proposed model showed accuracy performance comparable to that of deep learning models such as CNN-LSTM, CNN-RNN, VGG-8, and state-of-the-art RR estimation networks, operating at a relatively low computational cost. As a result, the proposed model demonstrated its advantage in low power consumption with a maximum of 64.5, 52.7, 50.5, and 20 times lower energy cost compared to the deep learning models. Furthermore, the analysis of the spike patterns from the trainable spike encoder revealed that spikes were periodically generated corresponding to the respiratory patterns of the reference signals, which enhanced the reliability of the model. To the best of our knowledge, this is the first study to apply an end-to-end SNN architecture to the regression analysis of real-world PPG data, thereby validating its effectiveness in RR estimation.</p>
    </sec>
  </body>
  <back>
    <notes>
      <title>Author Contributions</title>
      <p>Conceptualization, G.Y., L.L. and C.P.; Methodology, G.Y. and K.K.K.; Software, G.Y.; Validation, G.Y. and Y.K.; Formal Analysis, G.Y. and Y.K.; Investigation, G.Y. and Y.K.; Writing—Original Draft, G.Y., Y.K. and C.P.; Writing, G.Y., Y.K. and P.H.C.; Visualization, G.Y. and Y.K.; Data curation, P.H.C. and P.A.K. Supervision, L.L. and C.P. All authors have read and agreed to the published version of the manuscript.</p>
    </notes>
    <notes>
      <title>Institutional Review Board Statement</title>
      <p>Not applicable.</p>
    </notes>
    <notes>
      <title>Informed Consent Statement</title>
      <p>Not applicable.</p>
    </notes>
    <notes>
      <title>Data Availability Statement</title>
      <p>The data presented in this study are openly available in PhysioNet at <uri>https://doi.org/10.13026/C2208R</uri>.</p>
    </notes>
    <notes notes-type="COI-statement">
      <title>Conflicts of Interest</title>
      <p>Author Ko Keun Kim was employed by the company LG Electronics. The remaining authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.</p>
    </notes>
    <ref-list>
      <title>References</title>
      <ref id="B1-sensors-24-03980">
        <label>1.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Comroe</surname>
              <given-names>J.H.</given-names>
            </name>
          </person-group>
          <article-title>Physiology of respiration</article-title>
          <source>Acad. Med.</source>
          <year>1965</year>
          <volume>40</volume>
          <fpage>887</fpage>
        </element-citation>
      </ref>
      <ref id="B2-sensors-24-03980">
        <label>2.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Fieselmann</surname>
              <given-names>J.F.</given-names>
            </name>
            <name>
              <surname>Hendryx</surname>
              <given-names>M.S.</given-names>
            </name>
            <name>
              <surname>Helms</surname>
              <given-names>C.M.</given-names>
            </name>
            <name>
              <surname>Wakefield</surname>
              <given-names>D.S.</given-names>
            </name>
          </person-group>
          <article-title>Respiratory rate predicts cardiopulmonary arrest for internal medicine inpatients</article-title>
          <source>J. Gen. Intern. Med.</source>
          <year>1993</year>
          <volume>8</volume>
          <fpage>354</fpage>
          <lpage>360</lpage>
          <pub-id pub-id-type="doi">10.1007/BF02600071</pub-id>
          <pub-id pub-id-type="pmid">8410395</pub-id>
        </element-citation>
      </ref>
      <ref id="B3-sensors-24-03980">
        <label>3.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Lim</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Carty</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Macfarlane</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Anthony</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Christian</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Dakin</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Dennis</surname>
              <given-names>P.</given-names>
            </name>
          </person-group>
          <article-title>Respiratory rate measurement in adults—How reliable is it?</article-title>
          <source>Respir. Med.</source>
          <year>2002</year>
          <volume>96</volume>
          <fpage>31</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1053/rmed.2001.1203</pub-id>
          <pub-id pub-id-type="pmid">11863207</pub-id>
        </element-citation>
      </ref>
      <ref id="B4-sensors-24-03980">
        <label>4.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Nam</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Bautista</surname>
              <given-names>J.L.</given-names>
            </name>
            <name>
              <surname>Hahm</surname>
              <given-names>C.</given-names>
            </name>
            <name>
              <surname>Shin</surname>
              <given-names>H.</given-names>
            </name>
          </person-group>
          <article-title>Recognition of Respiratory Instability using a Photoplethysmography of Wrist-watch typeWearable Device</article-title>
          <source>IEIE Trans. Smart Process. Comput.</source>
          <year>2002</year>
          <volume>11</volume>
          <fpage>97</fpage>
          <lpage>104</lpage>
        </element-citation>
      </ref>
      <ref id="B5-sensors-24-03980">
        <label>5.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Teng</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>Y.</given-names>
            </name>
          </person-group>
          <article-title>Continuous and noninvasive estimation of arterial blood pressure using a photoplethysmographic approach</article-title>
          <source>Proceedings of the 25th Annual International Conference of the IEEE Engineering in Medicine and Biology Society (IEEE Cat. No. 03CH37439)</source>
          <conf-loc>Cancun, Mexico</conf-loc>
          <conf-date>17–21 September 2003</conf-date>
          <volume>Volume 4</volume>
          <fpage>3153</fpage>
          <lpage>3156</lpage>
        </element-citation>
      </ref>
      <ref id="B6-sensors-24-03980">
        <label>6.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Rodrigues</surname>
              <given-names>E.M.</given-names>
            </name>
            <name>
              <surname>Godina</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Cabrita</surname>
              <given-names>C.M.</given-names>
            </name>
            <name>
              <surname>Catalão</surname>
              <given-names>J.P.</given-names>
            </name>
          </person-group>
          <article-title>Experimental low cost reflective type oximeter for wearable health systems</article-title>
          <source>Biomed. Signal Process. Control</source>
          <year>2017</year>
          <volume>31</volume>
          <fpage>419</fpage>
          <lpage>433</lpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2016.09.013</pub-id>
        </element-citation>
      </ref>
      <ref id="B7-sensors-24-03980">
        <label>7.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Allen</surname>
              <given-names>J.</given-names>
            </name>
          </person-group>
          <article-title>Photoplethysmography and its application in clinical physiological measurement</article-title>
          <source>Physiol. Meas.</source>
          <year>2007</year>
          <volume>28</volume>
          <fpage>R1</fpage>
          <pub-id pub-id-type="doi">10.1088/0967-3334/28/3/R01</pub-id>
          <pub-id pub-id-type="pmid">17322588</pub-id>
        </element-citation>
      </ref>
      <ref id="B8-sensors-24-03980">
        <label>8.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Haddad</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Boukhayma</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Caizzone</surname>
              <given-names>A.</given-names>
            </name>
          </person-group>
          <article-title>Continuous PPG-based blood pressure monitoring using multi-linear regression</article-title>
          <source>IEEE J. Biomed. Health Inform.</source>
          <year>2021</year>
          <volume>26</volume>
          <fpage>2096</fpage>
          <lpage>2105</lpage>
          <pub-id pub-id-type="doi">10.1109/JBHI.2021.3128229</pub-id>
          <pub-id pub-id-type="pmid">34784288</pub-id>
        </element-citation>
      </ref>
      <ref id="B9-sensors-24-03980">
        <label>9.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Islam</surname>
              <given-names>M.T.</given-names>
            </name>
            <name>
              <surname>Zabir</surname>
              <given-names>I.</given-names>
            </name>
            <name>
              <surname>Ahamed</surname>
              <given-names>S.T.</given-names>
            </name>
            <name>
              <surname>Yasar</surname>
              <given-names>M.T.</given-names>
            </name>
            <name>
              <surname>Shahnaz</surname>
              <given-names>C.</given-names>
            </name>
            <name>
              <surname>Fattah</surname>
              <given-names>S.A.</given-names>
            </name>
          </person-group>
          <article-title>A time-frequency domain approach of heart rate estimation from photoplethysmographic (PPG) signal</article-title>
          <source>Biomed. Signal Process. Control</source>
          <year>2017</year>
          <volume>36</volume>
          <fpage>146</fpage>
          <lpage>154</lpage>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2017.03.020</pub-id>
        </element-citation>
      </ref>
      <ref id="B10-sensors-24-03980">
        <label>10.</label>
        <element-citation publication-type="book">
          <person-group person-group-type="author">
            <name>
              <surname>McCance</surname>
              <given-names>K.L.</given-names>
            </name>
            <name>
              <surname>Huether</surname>
              <given-names>S.E.</given-names>
            </name>
          </person-group>
          <source>Pathophysiology: The Biologic Basis for Disease in Adults and Children</source>
          <publisher-name>Elsevier Health Sciences</publisher-name>
          <publisher-loc>Amsterdam, The Netherlands</publisher-loc>
          <year>2014</year>
        </element-citation>
      </ref>
      <ref id="B11-sensors-24-03980">
        <label>11.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Flenady</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Dwyer</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Applegarth</surname>
              <given-names>J.</given-names>
            </name>
          </person-group>
          <article-title>Accurate respiratory rates count: So should you!</article-title>
          <source>Australas. Emerg. Nurs. J.</source>
          <year>2017</year>
          <volume>20</volume>
          <fpage>45</fpage>
          <lpage>47</lpage>
          <pub-id pub-id-type="doi">10.1016/j.aenj.2016.12.003</pub-id>
        </element-citation>
      </ref>
      <ref id="B12-sensors-24-03980">
        <label>12.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Charlton</surname>
              <given-names>P.H.</given-names>
            </name>
            <name>
              <surname>Birrenkott</surname>
              <given-names>D.A.</given-names>
            </name>
            <name>
              <surname>Bonnici</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Pimentel</surname>
              <given-names>M.A.</given-names>
            </name>
            <name>
              <surname>Johnson</surname>
              <given-names>A.E.</given-names>
            </name>
            <name>
              <surname>Alastruey</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Tarassenko</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Watkinson</surname>
              <given-names>P.J.</given-names>
            </name>
            <name>
              <surname>Beale</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Clifton</surname>
              <given-names>D.A.</given-names>
            </name>
          </person-group>
          <article-title>Breathing rate estimation from the electrocardiogram and photoplethysmogram: A review</article-title>
          <source>IEEE Rev. Biomed. Eng.</source>
          <year>2017</year>
          <volume>11</volume>
          <fpage>2</fpage>
          <lpage>20</lpage>
          <pub-id pub-id-type="doi">10.1109/RBME.2017.2763681</pub-id>
        </element-citation>
      </ref>
      <ref id="B13-sensors-24-03980">
        <label>13.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Bian</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Mehta</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Selvaraj</surname>
              <given-names>N.</given-names>
            </name>
          </person-group>
          <article-title>Respiratory rate estimation using PPG: A deep learning approach</article-title>
          <source>Proceedings of the 2020 42nd Annual International Conference of the IEEE Engineering in Medicine &amp; Biology Society (EMBC)</source>
          <conf-loc>Montreal, QC, Canada</conf-loc>
          <conf-date>20–24 July 2020</conf-date>
          <fpage>5948</fpage>
          <lpage>5952</lpage>
        </element-citation>
      </ref>
      <ref id="B14-sensors-24-03980">
        <label>14.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Madhav</surname>
              <given-names>K.V.</given-names>
            </name>
            <name>
              <surname>Ram</surname>
              <given-names>M.R.</given-names>
            </name>
            <name>
              <surname>Krishna</surname>
              <given-names>E.H.</given-names>
            </name>
            <name>
              <surname>Komalla</surname>
              <given-names>N.R.</given-names>
            </name>
            <name>
              <surname>Reddy</surname>
              <given-names>K.A.</given-names>
            </name>
          </person-group>
          <article-title>Estimation of respiration rate from ECG, BP and PPG signals using empirical mode decomposition</article-title>
          <source>Proceedings of the 2011 IEEE International Instrumentation and Measurement Technology Conference</source>
          <conf-loc>Hangzhou, China</conf-loc>
          <conf-date>10–12 May 2011</conf-date>
          <fpage>1</fpage>
          <lpage>4</lpage>
        </element-citation>
      </ref>
      <ref id="B15-sensors-24-03980">
        <label>15.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Garde</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Karlen</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Dehkordi</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Ansermino</surname>
              <given-names>J.M.</given-names>
            </name>
            <name>
              <surname>Dumont</surname>
              <given-names>G.A.</given-names>
            </name>
          </person-group>
          <article-title>Empirical mode decomposition for respiratory and heart rate estimation from the photoplethysmogram</article-title>
          <source>Proceedings of the Computing in Cardiology 2013</source>
          <conf-loc>Zaragoza, Spain</conf-loc>
          <conf-date>22–25 September 2013</conf-date>
          <fpage>799</fpage>
          <lpage>802</lpage>
        </element-citation>
      </ref>
      <ref id="B16-sensors-24-03980">
        <label>16.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Lazazzera</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Carrault</surname>
              <given-names>G.</given-names>
            </name>
          </person-group>
          <article-title>Breathing rate estimation methods from PPG signals, on CAPNOBASE database</article-title>
          <source>Proceedings of the 2020 Computing in Cardiology</source>
          <conf-loc>Rimini, Italy</conf-loc>
          <conf-date>13–16 September 2020</conf-date>
          <fpage>1</fpage>
          <lpage>4</lpage>
        </element-citation>
      </ref>
      <ref id="B17-sensors-24-03980">
        <label>17.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Pankaj</surname>
            </name>
            <name>
              <surname>Kumar</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Kumar</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Komaragiri</surname>
              <given-names>R.</given-names>
            </name>
          </person-group>
          <article-title>Optimized deep neural network models for blood pressure classification using Fourier analysis-based time–frequency spectrogram of photoplethysmography signal</article-title>
          <source>Biomed. Eng. Lett.</source>
          <year>2023</year>
          <volume>13</volume>
          <fpage>739</fpage>
          <lpage>750</lpage>
          <pub-id pub-id-type="doi">10.1007/s13534-023-00296-6</pub-id>
          <pub-id pub-id-type="pmid">37872982</pub-id>
        </element-citation>
      </ref>
      <ref id="B18-sensors-24-03980">
        <label>18.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Nilsson</surname>
              <given-names>L.M.</given-names>
            </name>
          </person-group>
          <article-title>Respiration signals from photoplethysmography</article-title>
          <source>Anesth. Analg.</source>
          <year>2013</year>
          <volume>117</volume>
          <fpage>859</fpage>
          <lpage>865</lpage>
          <pub-id pub-id-type="doi">10.1213/ANE.0b013e31828098b2</pub-id>
          <pub-id pub-id-type="pmid">23449854</pub-id>
        </element-citation>
      </ref>
      <ref id="B19-sensors-24-03980">
        <label>19.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Osathitporn</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Sawadwuthikul</surname>
              <given-names>G.</given-names>
            </name>
            <name>
              <surname>Thuwajit</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Ueafuea</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Mateepithaktham</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Kunaseth</surname>
              <given-names>N.</given-names>
            </name>
            <name>
              <surname>Choksatchawathi</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Punyabukkana</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Mignot</surname>
              <given-names>E.</given-names>
            </name>
            <name>
              <surname>Wilaiprasitporn</surname>
              <given-names>T.</given-names>
            </name>
          </person-group>
          <article-title>RRWaveNet: A Compact End-to-End Multi-Scale Residual CNN for Robust PPG Respiratory Rate Estimation</article-title>
          <source>IEEE Internet Things J.</source>
          <year>2023</year>
          <volume>10</volume>
          <fpage>15943</fpage>
          <lpage>15952</lpage>
          <pub-id pub-id-type="doi">10.1109/JIOT.2023.3265980</pub-id>
        </element-citation>
      </ref>
      <ref id="B20-sensors-24-03980">
        <label>20.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Chowdhury</surname>
              <given-names>M.H.</given-names>
            </name>
            <name>
              <surname>Shuzan</surname>
              <given-names>M.N.I.</given-names>
            </name>
            <name>
              <surname>Chowdhury</surname>
              <given-names>M.E.</given-names>
            </name>
            <name>
              <surname>Reaz</surname>
              <given-names>M.B.I.</given-names>
            </name>
            <name>
              <surname>Mahmud</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Al Emadi</surname>
              <given-names>N.</given-names>
            </name>
            <name>
              <surname>Ayari</surname>
              <given-names>M.A.</given-names>
            </name>
            <name>
              <surname>Ali</surname>
              <given-names>S.H.M.</given-names>
            </name>
            <name>
              <surname>Bakar</surname>
              <given-names>A.A.A.</given-names>
            </name>
            <name>
              <surname>Rahman</surname>
              <given-names>S.M.</given-names>
            </name>
            <etal/>
          </person-group>
          <article-title>Lightweight End-to-End Deep Learning Solution for Estimating the Respiration Rate from Photoplethysmogram Signal</article-title>
          <source>Bioengineering</source>
          <year>2022</year>
          <volume>9</volume>
          <elocation-id>558</elocation-id>
          <pub-id pub-id-type="doi">10.3390/bioengineering9100558</pub-id>
          <pub-id pub-id-type="pmid">36290527</pub-id>
        </element-citation>
      </ref>
      <ref id="B21-sensors-24-03980">
        <label>21.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Chollet</surname>
              <given-names>F.</given-names>
            </name>
          </person-group>
          <article-title>Xception: Deep learning with depthwise separable convolutions</article-title>
          <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source>
          <conf-loc>Honolulu, HI, USA</conf-loc>
          <conf-date>21–26 July 2017</conf-date>
          <fpage>1251</fpage>
          <lpage>1258</lpage>
        </element-citation>
      </ref>
      <ref id="B22-sensors-24-03980">
        <label>22.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Yamazaki</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Vo-Ho</surname>
              <given-names>V.K.</given-names>
            </name>
            <name>
              <surname>Bulsara</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Le</surname>
              <given-names>N.</given-names>
            </name>
          </person-group>
          <article-title>Spiking neural networks and their applications: A Review</article-title>
          <source>Brain Sci.</source>
          <year>2022</year>
          <volume>12</volume>
          <elocation-id>863</elocation-id>
          <pub-id pub-id-type="doi">10.3390/brainsci12070863</pub-id>
          <pub-id pub-id-type="pmid">35884670</pub-id>
        </element-citation>
      </ref>
      <ref id="B23-sensors-24-03980">
        <label>23.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Xing</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Hou</surname>
              <given-names>Z.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname>Shi</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Yuan</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>F.</given-names>
            </name>
            <name>
              <surname>Liang</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>Z.</given-names>
            </name>
            <name>
              <surname>Yan</surname>
              <given-names>L.</given-names>
            </name>
          </person-group>
          <article-title>Accurate ECG classification based on spiking neural network and attentional mechanism for real-time implementation on personal portable devices</article-title>
          <source>Electronics</source>
          <year>2022</year>
          <volume>11</volume>
          <elocation-id>1889</elocation-id>
          <pub-id pub-id-type="doi">10.3390/electronics11121889</pub-id>
        </element-citation>
      </ref>
      <ref id="B24-sensors-24-03980">
        <label>24.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Yang</surname>
              <given-names>J.</given-names>
            </name>
          </person-group>
          <article-title>Accurate Prediction and Analysis of College Studentsfrom Online Learning Behavior Data</article-title>
          <source>IEIE Trans. Smart Process. Comput.</source>
          <year>2023</year>
          <volume>12</volume>
          <fpage>404</fpage>
          <lpage>411</lpage>
          <pub-id pub-id-type="doi">10.5573/IEIESPC.2023.12.5.404</pub-id>
        </element-citation>
      </ref>
      <ref id="B25-sensors-24-03980">
        <label>25.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Rajagopal</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Karthick</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Meenalochini</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Kalaichelvi</surname>
              <given-names>T.</given-names>
            </name>
          </person-group>
          <article-title>Deep Convolutional Spiking Neural Network optimized with Arithmetic optimization algorithm for lung disease detection using chest X-ray images</article-title>
          <source>Biomed. Signal Process. Control</source>
          <year>2023</year>
          <volume>79</volume>
          <elocation-id>104197</elocation-id>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2022.104197</pub-id>
        </element-citation>
      </ref>
      <ref id="B26-sensors-24-03980">
        <label>26.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Maass</surname>
              <given-names>W.</given-names>
            </name>
          </person-group>
          <article-title>Networks of spiking neurons: The third generation of neural network models</article-title>
          <source>Neural Netw.</source>
          <year>1997</year>
          <volume>10</volume>
          <fpage>1659</fpage>
          <lpage>1671</lpage>
          <pub-id pub-id-type="doi">10.1016/S0893-6080(97)00011-7</pub-id>
        </element-citation>
      </ref>
      <ref id="B27-sensors-24-03980">
        <label>27.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Theunissen</surname>
              <given-names>F.</given-names>
            </name>
            <name>
              <surname>Miller</surname>
              <given-names>J.P.</given-names>
            </name>
          </person-group>
          <article-title>Temporal encoding in nervous systems: A rigorous definition</article-title>
          <source>J. Comput. Neurosci.</source>
          <year>1995</year>
          <volume>2</volume>
          <fpage>149</fpage>
          <lpage>162</lpage>
          <pub-id pub-id-type="doi">10.1007/BF00961885</pub-id>
        </element-citation>
      </ref>
      <ref id="B28-sensors-24-03980">
        <label>28.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Victor</surname>
              <given-names>J.D.</given-names>
            </name>
          </person-group>
          <article-title>Spike train metrics</article-title>
          <source>Curr. Opin. Neurobiol.</source>
          <year>2005</year>
          <volume>15</volume>
          <fpage>585</fpage>
          <lpage>592</lpage>
          <pub-id pub-id-type="doi">10.1016/j.conb.2005.08.002</pub-id>
          <pub-id pub-id-type="pmid">16140522</pub-id>
        </element-citation>
      </ref>
      <ref id="B29-sensors-24-03980">
        <label>29.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Auge</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Hille</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Mueller</surname>
              <given-names>E.</given-names>
            </name>
            <name>
              <surname>Knoll</surname>
              <given-names>A.</given-names>
            </name>
          </person-group>
          <article-title>A survey of encoding techniques for signal processing in spiking neural networks</article-title>
          <source>Neural Process. Lett.</source>
          <year>2021</year>
          <volume>53</volume>
          <fpage>4693</fpage>
          <lpage>4710</lpage>
          <pub-id pub-id-type="doi">10.1007/s11063-021-10562-2</pub-id>
        </element-citation>
      </ref>
      <ref id="B30-sensors-24-03980">
        <label>30.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Wu</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Chua</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>H.</given-names>
            </name>
            <name>
              <surname>Tan</surname>
              <given-names>K.C.</given-names>
            </name>
          </person-group>
          <article-title>A spiking neural network framework for robust sound classification</article-title>
          <source>Front. Neurosci.</source>
          <year>2018</year>
          <volume>12</volume>
          <elocation-id>836</elocation-id>
          <pub-id pub-id-type="doi">10.3389/fnins.2018.00836</pub-id>
          <pub-id pub-id-type="pmid">30510500</pub-id>
        </element-citation>
      </ref>
      <ref id="B31-sensors-24-03980">
        <label>31.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Yan</surname>
              <given-names>Z.</given-names>
            </name>
            <name>
              <surname>Zhou</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Wong</surname>
              <given-names>W.F.</given-names>
            </name>
          </person-group>
          <article-title>Energy efficient ECG classification with spiking neural network</article-title>
          <source>Biomed. Signal Process. Control</source>
          <year>2021</year>
          <volume>63</volume>
          <elocation-id>102170</elocation-id>
          <pub-id pub-id-type="doi">10.1016/j.bspc.2020.102170</pub-id>
        </element-citation>
      </ref>
      <ref id="B32-sensors-24-03980">
        <label>32.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Balakrishnan</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Baskaran</surname>
              <given-names>B.</given-names>
            </name>
            <name>
              <surname>Vivekanan</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Gokul</surname>
              <given-names>P.</given-names>
            </name>
          </person-group>
          <article-title>Binarized Spiking Neural Networks Optimized with Color Harmony Algorithm for Liver Cancer Classification</article-title>
          <source>IEIE Trans. Smart Process. Comput.</source>
          <year>2023</year>
          <volume>12</volume>
          <fpage>502</fpage>
          <lpage>510</lpage>
          <pub-id pub-id-type="doi">10.5573/IEIESPC.2023.12.6.502</pub-id>
        </element-citation>
      </ref>
      <ref id="B33-sensors-24-03980">
        <label>33.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Sengupta</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Ye</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Wang</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>C.</given-names>
            </name>
            <name>
              <surname>Roy</surname>
              <given-names>K.</given-names>
            </name>
          </person-group>
          <article-title>Going deeper in spiking neural networks: VGG and residual architectures</article-title>
          <source>Front. Neurosci.</source>
          <year>2019</year>
          <volume>13</volume>
          <elocation-id>95</elocation-id>
          <pub-id pub-id-type="doi">10.3389/fnins.2019.00095</pub-id>
          <pub-id pub-id-type="pmid">30899212</pub-id>
        </element-citation>
      </ref>
      <ref id="B34-sensors-24-03980">
        <label>34.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Simonyan</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Zisserman</surname>
              <given-names>A.</given-names>
            </name>
          </person-group>
          <article-title>Very deep convolutional networks for large-scale image recognition</article-title>
          <source>arXiv</source>
          <year>2014</year>
          <pub-id pub-id-type="arxiv">1409.1556</pub-id>
        </element-citation>
      </ref>
      <ref id="B35-sensors-24-03980">
        <label>35.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>He</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname>Ren</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Sun</surname>
              <given-names>J.</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning for image recognition</article-title>
          <source>Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition</source>
          <conf-loc>Las Vegas, NV, USA</conf-loc>
          <conf-date>27–30 June 2016</conf-date>
          <fpage>770</fpage>
          <lpage>778</lpage>
        </element-citation>
      </ref>
      <ref id="B36-sensors-24-03980">
        <label>36.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Dora</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Subramanian</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Suresh</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Sundararajan</surname>
              <given-names>N.</given-names>
            </name>
          </person-group>
          <article-title>Development of a self-regulating evolving spiking neural network for classification problem</article-title>
          <source>Neurocomputing</source>
          <year>2016</year>
          <volume>171</volume>
          <fpage>1216</fpage>
          <lpage>1229</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neucom.2015.07.086</pub-id>
        </element-citation>
      </ref>
      <ref id="B37-sensors-24-03980">
        <label>37.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Pimentel</surname>
              <given-names>M.A.</given-names>
            </name>
            <name>
              <surname>Johnson</surname>
              <given-names>A.E.</given-names>
            </name>
            <name>
              <surname>Charlton</surname>
              <given-names>P.H.</given-names>
            </name>
            <name>
              <surname>Birrenkott</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Watkinson</surname>
              <given-names>P.J.</given-names>
            </name>
            <name>
              <surname>Tarassenko</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Clifton</surname>
              <given-names>D.A.</given-names>
            </name>
          </person-group>
          <article-title>Toward a robust estimation of respiratory rate from pulse oximeters</article-title>
          <source>IEEE Trans. Biomed. Eng.</source>
          <year>2016</year>
          <volume>64</volume>
          <fpage>1914</fpage>
          <lpage>1923</lpage>
          <pub-id pub-id-type="doi">10.1109/TBME.2016.2613124</pub-id>
        </element-citation>
      </ref>
      <ref id="B38-sensors-24-03980">
        <label>38.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Goldberger</surname>
              <given-names>A.L.</given-names>
            </name>
            <name>
              <surname>Amaral</surname>
              <given-names>L.A.</given-names>
            </name>
            <name>
              <surname>Glass</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Hausdorff</surname>
              <given-names>J.M.</given-names>
            </name>
            <name>
              <surname>Ivanov</surname>
              <given-names>P.C.</given-names>
            </name>
            <name>
              <surname>Mark</surname>
              <given-names>R.G.</given-names>
            </name>
            <name>
              <surname>Mietus</surname>
              <given-names>J.E.</given-names>
            </name>
            <name>
              <surname>Moody</surname>
              <given-names>G.B.</given-names>
            </name>
            <name>
              <surname>Peng</surname>
              <given-names>C.K.</given-names>
            </name>
            <name>
              <surname>Stanley</surname>
              <given-names>H.E.</given-names>
            </name>
          </person-group>
          <article-title>PhysioBank, PhysioToolkit, and PhysioNet: Components of a new research resource for complex physiologic signals</article-title>
          <source>Circulation</source>
          <year>2000</year>
          <volume>101</volume>
          <fpage>e215</fpage>
          <lpage>e220</lpage>
          <pub-id pub-id-type="doi">10.1161/01.CIR.101.23.e215</pub-id>
        </element-citation>
      </ref>
      <ref id="B39-sensors-24-03980">
        <label>39.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Lee</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Scott</surname>
              <given-names>D.J.</given-names>
            </name>
            <name>
              <surname>Villarroel</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Clifford</surname>
              <given-names>G.D.</given-names>
            </name>
            <name>
              <surname>Saeed</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Mark</surname>
              <given-names>R.G.</given-names>
            </name>
          </person-group>
          <article-title>Open-access MIMIC-II database for intensive care research</article-title>
          <source>Proceedings of the 2011 Annual International Conference of the IEEE Engineering in Medicine and Biology Society</source>
          <conf-loc>Boston, MA, USA</conf-loc>
          <conf-date>30 August–3 September 2011</conf-date>
          <fpage>8315</fpage>
          <lpage>8318</lpage>
        </element-citation>
      </ref>
      <ref id="B40-sensors-24-03980">
        <label>40.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Xiang</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Jiang</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Yu</surname>
              <given-names>L.</given-names>
            </name>
          </person-group>
          <article-title>Spiking vgg7: Deep convolutional spiking neural network with direct training for object recognition</article-title>
          <source>Electronics</source>
          <year>2022</year>
          <volume>11</volume>
          <elocation-id>2097</elocation-id>
          <pub-id pub-id-type="doi">10.3390/electronics11132097</pub-id>
        </element-citation>
      </ref>
      <ref id="B41-sensors-24-03980">
        <label>41.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Hodgkin</surname>
              <given-names>A.L.</given-names>
            </name>
            <name>
              <surname>Huxley</surname>
              <given-names>A.F.</given-names>
            </name>
          </person-group>
          <article-title>A quantitative description of membrane current and its application to conduction and excitation in nerve</article-title>
          <source>J. Physiol.</source>
          <year>1952</year>
          <volume>117</volume>
          <fpage>500</fpage>
          <pub-id pub-id-type="doi">10.1113/jphysiol.1952.sp004764</pub-id>
          <pub-id pub-id-type="pmid">12991237</pub-id>
        </element-citation>
      </ref>
      <ref id="B42-sensors-24-03980">
        <label>42.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Izhikevich</surname>
              <given-names>E.M.</given-names>
            </name>
          </person-group>
          <article-title>Simple model of spiking neurons</article-title>
          <source>IEEE Trans. Neural Netw.</source>
          <year>2003</year>
          <volume>14</volume>
          <fpage>1569</fpage>
          <lpage>1572</lpage>
          <pub-id pub-id-type="doi">10.1109/TNN.2003.820440</pub-id>
          <pub-id pub-id-type="pmid">18244602</pub-id>
        </element-citation>
      </ref>
      <ref id="B43-sensors-24-03980">
        <label>43.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Wu</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Deng</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>G.</given-names>
            </name>
            <name>
              <surname>Zhu</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Xie</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Shi</surname>
              <given-names>L.</given-names>
            </name>
          </person-group>
          <article-title>Direct training for spiking neural networks: Faster, larger, better</article-title>
          <source>Proceedings of the AAAI Conference on Artificial Intelligence</source>
          <conf-loc>Honolulu, HI, USA</conf-loc>
          <conf-date>27 January–1 February 2019</conf-date>
          <volume>Volume 33</volume>
          <fpage>1311</fpage>
          <lpage>1318</lpage>
        </element-citation>
      </ref>
      <ref id="B44-sensors-24-03980">
        <label>44.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Datta</surname>
              <given-names>G.</given-names>
            </name>
            <name>
              <surname>Beerel</surname>
              <given-names>P.A.</given-names>
            </name>
          </person-group>
          <article-title>Can deep neural networks be converted to ultra low-latency spiking neural networks?</article-title>
          <source>Proceedings of the 2022 Design, Automation &amp; Test in Europe Conference &amp; Exhibition</source>
          <conf-loc>Antwerp, Belgium</conf-loc>
          <conf-date>14–23 March 2022</conf-date>
          <fpage>718</fpage>
          <lpage>723</lpage>
        </element-citation>
      </ref>
      <ref id="B45-sensors-24-03980">
        <label>45.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Horowitz</surname>
              <given-names>M.</given-names>
            </name>
          </person-group>
          <article-title>1.1 computing’s energy problem (and what we can do about it)</article-title>
          <source>Proceedings of the 2014 IEEE International Solid-State Circuits Conference Digest of Technical Papers (ISSCC)</source>
          <conf-loc>San Francisco, CA, USA</conf-loc>
          <conf-date>9–13 February 2014</conf-date>
          <fpage>10</fpage>
          <lpage>14</lpage>
        </element-citation>
      </ref>
      <ref id="B46-sensors-24-03980">
        <label>46.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Hochreiter</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Schmidhuber</surname>
              <given-names>J.</given-names>
            </name>
          </person-group>
          <article-title>Long Short-Term Memory</article-title>
          <source>Neural Comput.</source>
          <year>1997</year>
          <volume>9</volume>
          <fpage>1735</fpage>
          <lpage>1780</lpage>
          <pub-id pub-id-type="doi">10.1162/neco.1997.9.8.1735</pub-id>
          <pub-id pub-id-type="pmid">9377276</pub-id>
        </element-citation>
      </ref>
      <ref id="B47-sensors-24-03980">
        <label>47.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Medsker</surname>
              <given-names>L.R.</given-names>
            </name>
            <name>
              <surname>Jain</surname>
              <given-names>L.</given-names>
            </name>
          </person-group>
          <article-title>Recurrent neural networks</article-title>
          <source>Des. Appl.</source>
          <year>2001</year>
          <volume>5</volume>
          <fpage>2</fpage>
        </element-citation>
      </ref>
      <ref id="B48-sensors-24-03980">
        <label>48.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Fang</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Yu</surname>
              <given-names>Z.</given-names>
            </name>
            <name>
              <surname>Chen</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Huang</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Masquelier</surname>
              <given-names>T.</given-names>
            </name>
            <name>
              <surname>Tian</surname>
              <given-names>Y.</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning in spiking neural networks</article-title>
          <source>Adv. Neural Inf. Process. Syst.</source>
          <year>2021</year>
          <volume>34</volume>
          <fpage>21056</fpage>
          <lpage>21069</lpage>
        </element-citation>
      </ref>
      <ref id="B49-sensors-24-03980">
        <label>49.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Duan</surname>
              <given-names>C.</given-names>
            </name>
            <name>
              <surname>Ding</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Chen</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Yu</surname>
              <given-names>Z.</given-names>
            </name>
            <name>
              <surname>Huang</surname>
              <given-names>T.</given-names>
            </name>
          </person-group>
          <article-title>Temporal effective batch normalization in spiking neural networks</article-title>
          <source>Adv. Neural Inf. Process. Syst.</source>
          <year>2022</year>
          <volume>35</volume>
          <fpage>34377</fpage>
          <lpage>34390</lpage>
        </element-citation>
      </ref>
      <ref id="B50-sensors-24-03980">
        <label>50.</label>
        <element-citation publication-type="confproc">
          <person-group person-group-type="author">
            <name>
              <surname>Kim</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Park</surname>
              <given-names>H.</given-names>
            </name>
            <name>
              <surname>Venkatesha</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Panda</surname>
              <given-names>P.</given-names>
            </name>
          </person-group>
          <article-title>Neural architecture search for spiking neural networks</article-title>
          <source>Proceedings of the European Conference on Computer Vision</source>
          <conf-loc>Tel Aviv, Israel</conf-loc>
          <conf-date>23–27 October 2022</conf-date>
          <publisher-name>Springer</publisher-name>
          <publisher-loc>Berlin/Heidelberg, Germany</publisher-loc>
          <year>2022</year>
          <fpage>36</fpage>
          <lpage>56</lpage>
        </element-citation>
      </ref>
      <ref id="B51-sensors-24-03980">
        <label>51.</label>
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Rathi</surname>
              <given-names>N.</given-names>
            </name>
            <name>
              <surname>Roy</surname>
              <given-names>K.</given-names>
            </name>
          </person-group>
          <article-title>Diet-snn: Direct input encoding with leakage and threshold optimization in deep spiking neural networks</article-title>
          <source>arXiv</source>
          <year>2020</year>
          <pub-id pub-id-type="arxiv">2008.03658</pub-id>
        </element-citation>
      </ref>
    </ref-list>
    <sec sec-type="display-objects">
      <title>Figures and Tables</title>
      <fig id="sensors-24-03980-f001" position="float">
        <label>Figure 1</label>
        <caption>
          <p>The functional difference between DNNs and SNNs.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g001.tif"/>
      </fig>
      <fig id="sensors-24-03980-f002" position="float">
        <label>Figure 2</label>
        <caption>
          <p>Difference in the operation of the hard-reset and soft-reset mechanisms in an IF neuron.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g002.tif"/>
      </fig>
      <fig id="sensors-24-03980-f003" position="float">
        <label>Figure 3</label>
        <caption>
          <p>Surrogate functions for approximating the Heaviside step function. (<bold>a</bold>) Its original functions corresponding to the parameter <inline-formula><mml:math id="mm34"><mml:semantics><mml:mi>α</mml:mi></mml:semantics></mml:math></inline-formula> in Equation (<xref ref-type="disp-formula" rid="FD5-sensors-24-03980">5</xref>) and (<bold>b</bold>) its derivative function.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g003.tif"/>
      </fig>
      <fig id="sensors-24-03980-f004" position="float">
        <label>Figure 4</label>
        <caption>
          <p>Schematic diagram of the proposed network structure. The proposed network comprises a spike encoding layer (blue box), spike hidden layer (green box), and spike decoding layer (purple box). Note that the spike hidden layer was employed iteratively twice.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g004.tif"/>
      </fig>
      <fig id="sensors-24-03980-f005" position="float">
        <label>Figure 5</label>
        <caption>
          <p>Train and validation losses about CNN-LSTM, CNN-RNN, VGG-8, and Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>] when the window size is (<bold>a</bold>) 16, (<bold>b</bold>) 32, and (<bold>c</bold>) 64. The standard deviations are represented with shades.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g005.tif"/>
      </fig>
      <fig id="sensors-24-03980-f006" position="float">
        <label>Figure 6</label>
        <caption>
          <p>Bland–Altman graphs for the best performing models (<bold>a</bold>,<bold>b</bold>): Osathiporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>], (<bold>c</bold>): CNN-LSTM) and the proposed model (<bold>d</bold>–<bold>f</bold>) with window sizes of 16, 32, and 64. Note that the interval between the lower limit of agreement and the upper limit of agreement represents a 95% confidence interval.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g006.tif"/>
      </fig>
      <fig id="sensors-24-03980-f007" position="float">
        <label>Figure 7</label>
        <caption>
          <p>The raw PPG signals, true respiratory signals, and its encoded spike patterns from the PPG signal within 32-s window are visualized across (<bold>a</bold>) a clean PPG signal and (<bold>b</bold>), (<bold>c</bold>) noisy PPG signals. Note that the dotted red boxes depict the error regions.</p>
        </caption>
        <graphic xmlns:xlink="http://www.w3.org/1999/xlink" xlink:href="sensors-24-03980-g007.tif"/>
      </fig>
      <table-wrap id="sensors-24-03980-t001" position="float">
        <object-id pub-id-type="pii">sensors-24-03980-t001_Table 1</object-id>
        <label>Table 1</label>
        <caption>
          <p>Components of the proposed and benchmark models.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Model Components</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Proposed Model</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">CNN-LSTM</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">CNN-RNN</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">VGG-8</th>
              <th align="center" valign="middle" style="border-bottom:solid thin;border-top:solid thin">Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>]</th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle">CNN Layers</td>
              <td align="center" valign="middle">3</td>
              <td align="center" valign="middle">3</td>
              <td align="center" valign="middle">3</td>
              <td align="center" valign="middle">5</td>
              <td align="center" valign="middle">5</td>
            </tr>
            <tr>
              <td align="center" valign="middle">LSTM Layers</td>
              <td align="center" valign="middle">-</td>
              <td align="center" valign="middle">1</td>
              <td align="center" valign="middle">-</td>
              <td align="center" valign="middle">-</td>
              <td align="center" valign="middle">-</td>
            </tr>
            <tr>
              <td align="center" valign="middle">RNN Layers</td>
              <td align="center" valign="middle">-</td>
              <td align="center" valign="middle">-</td>
              <td align="center" valign="middle">1</td>
              <td align="center" valign="middle">-</td>
              <td align="center" valign="middle">-</td>
            </tr>
            <tr>
              <td align="center" valign="middle">Dense Layers</td>
              <td align="center" valign="middle">2</td>
              <td align="center" valign="middle">1</td>
              <td align="center" valign="middle">1</td>
              <td align="center" valign="middle">3</td>
              <td align="center" valign="middle">3</td>
            </tr>
            <tr>
              <td align="center" valign="middle">CNN Filter Size</td>
              <td align="center" valign="middle">20/8/8</td>
              <td align="center" valign="middle">10/5/5</td>
              <td align="center" valign="middle">10/5/5</td>
              <td align="center" valign="middle">3/3/3/3/3</td>
              <td align="center" valign="middle">16/32/64/3/3</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Activation Functions</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">IF/RLIF</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Leaky-ReLU</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Leaky-ReLU</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Leaky-ReLU</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">Leaky-ReLU</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap id="sensors-24-03980-t002" position="float">
        <object-id pub-id-type="pii">sensors-24-03980-t002_Table 2</object-id>
        <label>Table 2</label>
        <caption>
          <p>PCC and MAE performances corresponding to the different window sizes of PPG segment and time steps.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th rowspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Time Steps (T)</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 16</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 32</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 64</th>
            </tr>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                PCC
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                MAE (bpm)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                PCC
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                MAE (bpm)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                PCC
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                MAE (bpm)
              </th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle">T = 4</td>
              <td align="center" valign="middle">0.4980 ± 0.0287</td>
              <td align="center" valign="middle">1.5247 ± 0.0332</td>
              <td align="center" valign="middle">0.6074 ± 0.0064</td>
              <td align="center" valign="middle">1.3642 ± 0.0129</td>
              <td align="center" valign="middle">0.6219 ± 0.0111</td>
              <td align="center" valign="middle">1.3153 ± 0.0134</td>
            </tr>
            <tr>
              <td align="center" valign="middle">T = 8</td>
              <td align="center" valign="middle">
                <bold>0.5695 ± 0.0319</bold>
              </td>
              <td align="center" valign="middle">
                <bold>1.3710 ± 0.0481</bold>
              </td>
              <td align="center" valign="middle">
                <bold>0.6360 ± 0.0070</bold>
              </td>
              <td align="center" valign="middle">
                <bold>1.2234 ± 0.0372</bold>
              </td>
              <td align="center" valign="middle">0.6615 ± 0.0473</td>
              <td align="center" valign="middle">
                <bold>1.1518 ± 0.0697</bold>
              </td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">T = 16</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">0.5383 ± 0.0343</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">1.4671 ± 0.0157</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">0.5447 ± 0.0117</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">1.3646 ± 0.0352</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>0.6630 ± 0.0199</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">1.2204 ± 0.0493</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap id="sensors-24-03980-t003" position="float">
        <object-id pub-id-type="pii">sensors-24-03980-t003_Table 3</object-id>
        <label>Table 3</label>
        <caption>
          <p>Testing results of the proposed model compared with the other DNN models.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th rowspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Model</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 16</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 32</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 64</th>
            </tr>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                PCC
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                MAE (bpm)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                PCC
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                MAE (bpm)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                PCC
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                MAE (bpm)
              </th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle">CNN-LSTM</td>
              <td align="center" valign="middle">0.5926 ± 0.0355</td>
              <td align="center" valign="middle">1.3681 ± 0.0685</td>
              <td align="center" valign="middle">0.6864 ± 0.0342</td>
              <td align="center" valign="middle">1.1169 ± 0.0705</td>
              <td align="center" valign="middle">0.7077 ± 0.0245</td>
              <td align="center" valign="middle">
                <bold>1.1116 ± 0.0343</bold>
              </td>
            </tr>
            <tr>
              <td align="center" valign="middle">CNN-RNN</td>
              <td align="center" valign="middle">0.5233 ± 0.0113</td>
              <td align="center" valign="middle">1.4757 ± 0.0348</td>
              <td align="center" valign="middle">
                <bold>0.6980 ± 0.0270</bold>
              </td>
              <td align="center" valign="middle">1.1605 ± 0.0675</td>
              <td align="center" valign="middle">
                <bold>0.7489 ± 0.0298</bold>
              </td>
              <td align="center" valign="middle">1.1537 ± 0.0448</td>
            </tr>
            <tr>
              <td align="center" valign="middle">VGG-8</td>
              <td align="center" valign="middle">0.4577 ± 0.0329</td>
              <td align="center" valign="middle">1.4721 ± 0.1795</td>
              <td align="center" valign="middle">0.5305 ± 0.0312</td>
              <td align="center" valign="middle">1.4434 ± 0.0705</td>
              <td align="center" valign="middle">0.5007 ± 0.1199</td>
              <td align="center" valign="middle">1.4053 ± 0.0964</td>
            </tr>
            <tr>
              <td align="center" valign="middle">Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>]</td>
              <td align="center" valign="middle">
                <bold>0.5945 ± 0.0142</bold>
              </td>
              <td align="center" valign="middle">
                <bold>1.3460 ± 0.0128</bold>
              </td>
              <td align="center" valign="middle">0.6705 ± 0.0045</td>
              <td align="center" valign="middle">
                <bold>1.1121 ± 0.0108</bold>
              </td>
              <td align="center" valign="middle">0.6643 ± 0.0103</td>
              <td align="center" valign="middle">1.1321 ± 0.0150</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Proposed model</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">0.5695 ± 0.0319</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">1.3710 ± 0.0481</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">0.6360 ± 0.0070</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">1.2240 ± 0.0372</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">0.6615 ± 0.0473</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">1.1518 ± 0.0697</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
      <table-wrap id="sensors-24-03980-t004" position="float">
        <object-id pub-id-type="pii">sensors-24-03980-t004_Table 4</object-id>
        <label>Table 4</label>
        <caption>
          <p>FLOPs and energy cost results of the proposed model compared with the other DNN models.</p>
        </caption>
        <table>
          <thead>
            <tr>
              <th rowspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Model</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 16</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 32</th>
              <th colspan="2" align="center" valign="middle" style="border-top:solid thin;border-bottom:solid thin">Window Size (s) = 64</th>
            </tr>
            <tr>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                FLOPs (M)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                Energy Cost (μJ)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                FLOPs (M)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                Energy Cost (μJ)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                FLOPs (M)
              </th>
              <th align="center" valign="middle" style="border-bottom:solid thin">
                Energy Cost (μJ)
              </th>
            </tr>
          </thead>
          <tbody>
            <tr>
              <td align="center" valign="middle">CNN-LSTM</td>
              <td align="center" valign="middle">16.44</td>
              <td align="center" valign="middle">0.5260</td>
              <td align="center" valign="middle">44.38</td>
              <td align="center" valign="middle">1.4200</td>
              <td align="center" valign="middle">84.75</td>
              <td align="center" valign="middle">2.7120</td>
            </tr>
            <tr>
              <td align="center" valign="middle">CNN-RNN</td>
              <td align="center" valign="middle">31.99</td>
              <td align="center" valign="middle">1.0230</td>
              <td align="center" valign="middle">35.04</td>
              <td align="center" valign="middle">1.1210</td>
              <td align="center" valign="middle">69.26</td>
              <td align="center" valign="middle">2.2160</td>
            </tr>
            <tr>
              <td align="center" valign="middle">VGG-8</td>
              <td align="center" valign="middle">16.01</td>
              <td align="center" valign="middle">0.5523</td>
              <td align="center" valign="middle">28.19</td>
              <td align="center" valign="middle">1.0760</td>
              <td align="center" valign="middle">55.99</td>
              <td align="center" valign="middle">2.1250</td>
            </tr>
            <tr>
              <td align="center" valign="middle">Osathitporn et al. [<xref ref-type="bibr" rid="B19-sensors-24-03980">19</xref>]</td>
              <td align="center" valign="middle">6.98</td>
              <td align="center" valign="middle">0.2233</td>
              <td align="center" valign="middle">13.44</td>
              <td align="center" valign="middle">0.4301</td>
              <td align="center" valign="middle">26.34</td>
              <td align="center" valign="middle">0.8428</td>
            </tr>
            <tr>
              <td align="center" valign="middle" style="border-bottom:solid thin">Proposed model</td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>6.59
</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>0.0120</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>12.21</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>0.0230</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>21.32</bold>
              </td>
              <td align="center" valign="middle" style="border-bottom:solid thin">
                <bold>0.0420</bold>
              </td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <fn-group>
      <fn>
        <p><bold>Disclaimer/Publisher’s Note:</bold> The statements, opinions and data contained in all publications are solely those of the individual author(s) and contributor(s) and not of MDPI and/or the editor(s). MDPI and/or the editor(s) disclaim responsibility for any injury to people or property resulting from any ideas, methods, instructions or products referred to in the content.</p>
      </fn>
    </fn-group>
  </back>
</article>
