iwm: add support for firmware paging

classic Classic list List threaded Threaded
6 messages Options
Reply | Threaded
Open this post in threaded view
|

iwm: add support for firmware paging

Stefan Sperling-5
Newer iwm(4) firmware versions will require paging to host DRAM.
This diff implements support for this. It was written by Imre Vadasz in 2017.

I would like to get review by people with experience in the kernel memory
subsystem and DMA, to check that this diff isn't doing something stupid.

As it is, the diff can only be tested for regressions because our current
firmware versions do not use paging. And the diff alone is not sufficient to
make newer firmware versions work with our driver, just one step on the way.
The diff seems to work OK with -31 and -34 8265 firmware in combination with
additional patches I have in my tree.
It applies on top of -current which just received a couple of commits to iwm.
Make sure your tree is up-to-date before applying it.

diff b8720deb3d8c2ae7a0a629e71606d48da4c53669 6bfc87e6a1a13d601330e23b3cee057701e0a418
blob - a14efb48f10b28d578331ce295178abdf58862d7
blob + 8b4ab3a2132ed1e4947151a309a9e78b90e31009
--- sys/dev/pci/if_iwm.c
+++ sys/dev/pci/if_iwm.c
@@ -457,6 +457,9 @@ int iwm_sf_config(struct iwm_softc *, int);
 int iwm_send_bt_init_conf(struct iwm_softc *);
 int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
 void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
+void iwm_free_fw_paging(struct iwm_softc *);
+int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
+int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
 int iwm_init_hw(struct iwm_softc *);
 int iwm_init(struct ifnet *);
 void iwm_start(struct ifnet *);
@@ -584,6 +587,8 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode
  struct iwm_ucode_tlv tlv;
  uint32_t tlv_type;
  uint8_t *data;
+ uint32_t usniffer_img;
+ uint32_t paging_mem_size;
  int err;
  size_t len;
 
@@ -787,6 +792,37 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode
  goto parse_out;
  break;
 
+ case IWM_UCODE_TLV_PAGING:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
+
+ DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
+    DEVNAME(sc), paging_mem_size));
+ if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
+ printf("%s: Driver only supports up to %u"
+    " bytes for paging image (%u requested)\n",
+    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
+    paging_mem_size);
+ err = EINVAL;
+ goto out;
+ }
+ if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
+ printf("%s: Paging: image isn't multiple of %u\n",
+    DEVNAME(sc), IWM_FW_PAGING_SIZE);
+ err = EINVAL;
+ goto out;
+ }
+
+ fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
+    paging_mem_size;
+ usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
+ fw->fw_sects[usniffer_img].paging_mem_size =
+    paging_mem_size;
+ break;
+
  case IWM_UCODE_TLV_N_SCAN_CHANNELS:
  if (tlv_len != sizeof(uint32_t)) {
  err = EINVAL;
@@ -3219,6 +3255,7 @@ iwm_load_ucode_wait_alive(struct iwm_softc *sc,
  enum iwm_ucode_type ucode_type)
 {
  enum iwm_ucode_type old_type = sc->sc_uc_current;
+ struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
  int err;
 
  err = iwm_read_firmware(sc, ucode_type);
@@ -3237,7 +3274,33 @@ iwm_load_ucode_wait_alive(struct iwm_softc *sc,
  return err;
  }
 
- return iwm_post_alive(sc);
+ err = iwm_post_alive(sc);
+ if (err)
+ return err;
+
+ /*
+ * configure and operate fw paging mechanism.
+ * driver configures the paging flow only once, CPU2 paging image
+ * included in the IWM_UCODE_INIT image.
+ */
+ if (fw->paging_mem_size) {
+ err = iwm_save_fw_paging(sc, fw);
+ if (err) {
+ printf("%s: failed to save the FW paging image\n",
+    DEVNAME(sc));
+ return err;
+ }
+
+ err = iwm_send_paging_cmd(sc, fw);
+ if (err) {
+ printf("%s: failed to send the paging cmd\n",
+    DEVNAME(sc));
+ iwm_free_fw_paging(sc);
+ return err;
+ }
+ }
+
+ return 0;
 }
 
 int
@@ -6348,7 +6411,228 @@ iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backo
  iwm_send_cmd(sc, &cmd);
 }
 
+void
+iwm_free_fw_paging(struct iwm_softc *sc)
+{
+ int i;
+
+ if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
+ return;
+
+ for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
+ iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
+ }
+
+ memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
+}
+
 int
+iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
+{
+ int sec_idx, idx;
+ uint32_t offset = 0;
+
+ /*
+ * find where is the paging image start point:
+ * if CPU2 exist and it's in paging format, then the image looks like:
+ * CPU1 sections (2 or more)
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+ * CPU2 sections (not paged)
+ * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+ * non paged to CPU2 paging sec
+ * CPU2 paging CSS
+ * CPU2 paging image (including instruction and data)
+ */
+ for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
+ if (image->fw_sect[sec_idx].fws_devoff ==
+    IWM_PAGING_SEPARATOR_SECTION) {
+ sec_idx++;
+ break;
+ }
+ }
+
+ /*
+ * If paging is enabled there should be at least 2 more sections left
+ * (one for CSS and one for Paging data)
+ */
+ if (sec_idx >= nitems(image->fw_sect) - 1) {
+ printf("%s: Paging: Missing CSS and/or paging sections\n",
+    DEVNAME(sc));
+ iwm_free_fw_paging(sc);
+ return EINVAL;
+ }
+
+ /* copy the CSS block to the dram */
+ DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
+    DEVNAME(sc), sec_idx));
+
+ memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
+    image->fw_sect[sec_idx].fws_data,
+    sc->fw_paging_db[0].fw_paging_size);
+
+ DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
+    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
+
+ sec_idx++;
+
+ /*
+ * copy the paging blocks to the dram
+ * loop index start from 1 since that CSS block already copied to dram
+ * and CSS index is 0.
+ * loop stop at num_of_paging_blk since that last block is not full.
+ */
+ for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
+ memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
+    (const char *)image->fw_sect[sec_idx].fws_data + offset,
+    sc->fw_paging_db[idx].fw_paging_size);
+
+ DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
+    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
+
+ offset += sc->fw_paging_db[idx].fw_paging_size;
+ }
+
+ /* copy the last paging block */
+ if (sc->num_of_pages_in_last_blk > 0) {
+ memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
+    (const char *)image->fw_sect[sec_idx].fws_data + offset,
+    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
+
+ DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
+    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
+ }
+
+ return 0;
+}
+
+int
+iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
+{
+ int blk_idx = 0;
+ int error, num_of_pages;
+
+ if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
+ int i;
+ /* Device got reset, and we setup firmware paging again */
+ bus_dmamap_sync(sc->sc_dmat,
+    sc->fw_paging_db[0].fw_paging_block.map,
+    0, IWM_FW_PAGING_SIZE,
+    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
+ for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
+ bus_dmamap_sync(sc->sc_dmat,
+    sc->fw_paging_db[i].fw_paging_block.map,
+    0, IWM_PAGING_BLOCK_SIZE,
+    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
+ }
+ return 0;
+ }
+
+ /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
+#if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
+#error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
+#endif
+
+ num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
+ sc->num_of_paging_blk =
+    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
+
+ sc->num_of_pages_in_last_blk =
+ num_of_pages -
+ IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
+
+ DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
+    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
+    sc->num_of_paging_blk,
+    sc->num_of_pages_in_last_blk));
+
+ /* allocate block of 4Kbytes for paging CSS */
+ error = iwm_dma_contig_alloc(sc->sc_dmat,
+    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
+    4096);
+ if (error) {
+ /* free all the previous pages since we failed */
+ iwm_free_fw_paging(sc);
+ return ENOMEM;
+ }
+
+ sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
+
+ DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
+    DEVNAME(sc)));
+
+ /*
+ * allocate blocks in dram.
+ * since that CSS allocated in fw_paging_db[0] loop start from index 1
+ */
+ for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
+ /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
+ /* XXX Use iwm_dma_contig_alloc for allocating */
+ error = iwm_dma_contig_alloc(sc->sc_dmat,
+     &sc->fw_paging_db[blk_idx].fw_paging_block,
+    IWM_PAGING_BLOCK_SIZE, 4096);
+ if (error) {
+ /* free all the previous pages since we failed */
+ iwm_free_fw_paging(sc);
+ return ENOMEM;
+ }
+
+ sc->fw_paging_db[blk_idx].fw_paging_size =
+    IWM_PAGING_BLOCK_SIZE;
+
+ DPRINTF((
+    "%s: Paging: allocated 32K bytes for firmware paging.\n",
+    DEVNAME(sc)));
+ }
+
+ return 0;
+}
+
+int
+iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
+{
+ int ret;
+
+ ret = iwm_alloc_fw_paging_mem(sc, fw);
+ if (ret)
+ return ret;
+
+ return iwm_fill_paging_mem(sc, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+int
+iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
+{
+ int blk_idx;
+ uint32_t dev_phy_addr;
+ struct iwm_fw_paging_cmd fw_paging_cmd = {
+ .flags =
+ htole32(IWM_PAGING_CMD_IS_SECURED |
+ IWM_PAGING_CMD_IS_ENABLED |
+ (sc->num_of_pages_in_last_blk <<
+ IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+ .block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
+ .block_num = htole32(sc->num_of_paging_blk),
+ };
+
+ /* loop for for all paging blocks + CSS block */
+ for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
+ dev_phy_addr = htole32(
+    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
+    IWM_PAGE_2_EXP_SIZE);
+ fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+ bus_dmamap_sync(sc->sc_dmat,
+    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
+    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
+    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ }
+
+ return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
+       IWM_LONG_GROUP, 0),
+    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+int
 iwm_init_hw(struct iwm_softc *sc)
 {
  struct ieee80211com *ic = &sc->sc_ic;
@@ -7188,6 +7472,8 @@ iwm_notif_intr(struct iwm_softc *sc)
  case IWM_REMOVE_STA:
  case IWM_TXPATH_FLUSH:
  case IWM_LQ_CMD:
+ case IWM_WIDE_ID(IWM_LONG_GROUP,
+ IWM_FW_PAGING_BLOCK_CMD):
  case IWM_BT_CONFIG:
  case IWM_REPLY_THERMAL_MNG_BACKOFF:
  case IWM_NVM_ACCESS_CMD:
blob - 8919f30dc6b86e23fb9433fbe2998394c17eb7b6
blob + 1e6308bbd5b4958427d4a354fb4f339688bbc8d1
--- sys/dev/pci/if_iwmreg.h
+++ sys/dev/pci/if_iwmreg.h
@@ -1737,6 +1737,9 @@ struct iwm_agn_scd_bc_tbl {
 #define IWM_CALIBRATION_COMPLETE_NOTIFICATION 0x67
 #define IWM_RADIO_VERSION_NOTIFICATION 0x68
 
+/* paging block to FW cpu2 */
+#define IWM_FW_PAGING_BLOCK_CMD 0x4f
+
 /* Scan offload */
 #define IWM_SCAN_OFFLOAD_REQUEST_CMD 0x51
 #define IWM_SCAN_OFFLOAD_ABORT_CMD 0x52
@@ -2099,6 +2102,57 @@ struct iwm_nvm_access_cmd {
  uint16_t length;
  uint8_t data[];
 } __packed; /* IWM_NVM_ACCESS_CMD_API_S_VER_2 */
+
+/*
+ * Block paging calculations
+ */
+#define IWM_PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define IWM_FW_PAGING_SIZE (1 << IWM_PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define IWM_PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define IWM_NUM_OF_PAGE_PER_GROUP (1 << IWM_PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define IWM_PAGING_BLOCK_SIZE (IWM_NUM_OF_PAGE_PER_GROUP * IWM_FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define IWM_BLOCK_2_EXP_SIZE (IWM_PAGE_2_EXP_SIZE + IWM_PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define IWM_BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define IWM_NUM_OF_BLOCK_PER_IMAGE (1 << IWM_BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define IWM_MAX_PAGING_IMAGE_SIZE (IWM_NUM_OF_BLOCK_PER_IMAGE * IWM_PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define IWM_PAGING_ADDR_SIG 0xAA000000
+
+#define IWM_PAGING_CMD_IS_SECURED (1 << 9)
+#define IWM_PAGING_CMD_IS_ENABLED (1 << 8)
+#define IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
+#define IWM_PAGING_TLV_SECURE_MASK 1
+
+#define IWM_NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwm_fw_paging_cmd - paging layout
+ *
+ * (IWM_FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwm_fw_paging_cmd {
+ uint32_t flags;
+ uint32_t block_size;
+ uint32_t block_num;
+ uint32_t device_phy_addr[IWM_NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* IWM_FW_PAGING_BLOCK_CMD_API_S_VER_1 */
 
 /**
  * struct iwm_nvm_access_resp_ver2 - response to IWM_NVM_ACCESS_CMD
blob - 0eaf07bed8dfc0ec674e55b3da211ef88ad93dfe
blob + edf48df74c872bdeab3a08c65bf2434e4f297509
--- sys/dev/pci/if_iwmvar.h
+++ sys/dev/pci/if_iwmvar.h
@@ -173,6 +173,7 @@ struct iwm_fw_info {
  } fw_sect[IWM_UCODE_SECT_MAX];
  size_t fw_totlen;
  int fw_count;
+ uint32_t paging_mem_size;
  } fw_sects[IWM_UCODE_TYPE_MAX];
 };
 
@@ -237,6 +238,16 @@ struct iwm_dma_info {
  bus_size_t size;
 };
 
+/**
+ * struct iwm_fw_paging
+ * @fw_paging_block: dma memory info
+ * @fw_paging_size: page size
+ */
+struct iwm_fw_paging {
+ struct iwm_dma_info fw_paging_block;
+ uint32_t fw_paging_size;
+};
+
 #define IWM_TX_RING_COUNT 256
 #define IWM_TX_RING_LOMARK 192
 #define IWM_TX_RING_HIMARK 224
@@ -486,6 +497,14 @@ struct iwm_softc {
 
  int host_interrupt_operation_mode;
  int sc_ltr_enabled;
+
+ /*
+ * Paging parameters - All of the parameters should be set by the
+ * opmode when paging is enabled
+ */
+ struct iwm_fw_paging fw_paging_db[IWM_NUM_OF_FW_PAGING_BLOCKS];
+ uint16_t num_of_paging_blk;
+ uint16_t num_of_pages_in_last_blk;
 
 #if NBPFILTER > 0
  caddr_t sc_drvbpf;

Reply | Threaded
Open this post in threaded view
|

Re: iwm: add support for firmware paging

Mark Kettenis
> Date: Mon, 28 Oct 2019 19:28:29 +0100
> From: Stefan Sperling <[hidden email]>
>
> Newer iwm(4) firmware versions will require paging to host DRAM.
> This diff implements support for this. It was written by Imre Vadasz in 2017.
>
> I would like to get review by people with experience in the kernel memory
> subsystem and DMA, to check that this diff isn't doing something stupid.

I have trouble understanding what the bus_dmamap_sync() calls in
iwm_alloc_fw_paging_mem() are supposed to achieve.

> As it is, the diff can only be tested for regressions because our current
> firmware versions do not use paging. And the diff alone is not sufficient to
> make newer firmware versions work with our driver, just one step on the way.
> The diff seems to work OK with -31 and -34 8265 firmware in combination with
> additional patches I have in my tree.
> It applies on top of -current which just received a couple of commits to iwm.
> Make sure your tree is up-to-date before applying it.
>
> diff b8720deb3d8c2ae7a0a629e71606d48da4c53669 6bfc87e6a1a13d601330e23b3cee057701e0a418
> blob - a14efb48f10b28d578331ce295178abdf58862d7
> blob + 8b4ab3a2132ed1e4947151a309a9e78b90e31009
> --- sys/dev/pci/if_iwm.c
> +++ sys/dev/pci/if_iwm.c
> @@ -457,6 +457,9 @@ int iwm_sf_config(struct iwm_softc *, int);
>  int iwm_send_bt_init_conf(struct iwm_softc *);
>  int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
>  void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
> +void iwm_free_fw_paging(struct iwm_softc *);
> +int iwm_save_fw_paging(struct iwm_softc *, const struct iwm_fw_sects *);
> +int iwm_send_paging_cmd(struct iwm_softc *, const struct iwm_fw_sects *);
>  int iwm_init_hw(struct iwm_softc *);
>  int iwm_init(struct ifnet *);
>  void iwm_start(struct ifnet *);
> @@ -584,6 +587,8 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode
>   struct iwm_ucode_tlv tlv;
>   uint32_t tlv_type;
>   uint8_t *data;
> + uint32_t usniffer_img;
> + uint32_t paging_mem_size;
>   int err;
>   size_t len;
>  
> @@ -787,6 +792,37 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode
>   goto parse_out;
>   break;
>  
> + case IWM_UCODE_TLV_PAGING:
> + if (tlv_len != sizeof(uint32_t)) {
> + err = EINVAL;
> + goto parse_out;
> + }
> + paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
> +
> + DPRINTF(("%s: Paging: paging enabled (size = %u bytes)\n",
> +    DEVNAME(sc), paging_mem_size));
> + if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
> + printf("%s: Driver only supports up to %u"
> +    " bytes for paging image (%u requested)\n",
> +    DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
> +    paging_mem_size);
> + err = EINVAL;
> + goto out;
> + }
> + if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
> + printf("%s: Paging: image isn't multiple of %u\n",
> +    DEVNAME(sc), IWM_FW_PAGING_SIZE);
> + err = EINVAL;
> + goto out;
> + }
> +
> + fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
> +    paging_mem_size;
> + usniffer_img = IWM_UCODE_TYPE_REGULAR_USNIFFER;
> + fw->fw_sects[usniffer_img].paging_mem_size =
> +    paging_mem_size;
> + break;
> +
>   case IWM_UCODE_TLV_N_SCAN_CHANNELS:
>   if (tlv_len != sizeof(uint32_t)) {
>   err = EINVAL;
> @@ -3219,6 +3255,7 @@ iwm_load_ucode_wait_alive(struct iwm_softc *sc,
>   enum iwm_ucode_type ucode_type)
>  {
>   enum iwm_ucode_type old_type = sc->sc_uc_current;
> + struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
>   int err;
>  
>   err = iwm_read_firmware(sc, ucode_type);
> @@ -3237,7 +3274,33 @@ iwm_load_ucode_wait_alive(struct iwm_softc *sc,
>   return err;
>   }
>  
> - return iwm_post_alive(sc);
> + err = iwm_post_alive(sc);
> + if (err)
> + return err;
> +
> + /*
> + * configure and operate fw paging mechanism.
> + * driver configures the paging flow only once, CPU2 paging image
> + * included in the IWM_UCODE_INIT image.
> + */
> + if (fw->paging_mem_size) {
> + err = iwm_save_fw_paging(sc, fw);
> + if (err) {
> + printf("%s: failed to save the FW paging image\n",
> +    DEVNAME(sc));
> + return err;
> + }
> +
> + err = iwm_send_paging_cmd(sc, fw);
> + if (err) {
> + printf("%s: failed to send the paging cmd\n",
> +    DEVNAME(sc));
> + iwm_free_fw_paging(sc);
> + return err;
> + }
> + }
> +
> + return 0;
>  }
>  
>  int
> @@ -6348,7 +6411,228 @@ iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backo
>   iwm_send_cmd(sc, &cmd);
>  }
>  
> +void
> +iwm_free_fw_paging(struct iwm_softc *sc)
> +{
> + int i;
> +
> + if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
> + return;
> +
> + for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
> + iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
> + }
> +
> + memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
> +}
> +
>  int
> +iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
> +{
> + int sec_idx, idx;
> + uint32_t offset = 0;
> +
> + /*
> + * find where is the paging image start point:
> + * if CPU2 exist and it's in paging format, then the image looks like:
> + * CPU1 sections (2 or more)
> + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
> + * CPU2 sections (not paged)
> + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
> + * non paged to CPU2 paging sec
> + * CPU2 paging CSS
> + * CPU2 paging image (including instruction and data)
> + */
> + for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
> + if (image->fw_sect[sec_idx].fws_devoff ==
> +    IWM_PAGING_SEPARATOR_SECTION) {
> + sec_idx++;
> + break;
> + }
> + }
> +
> + /*
> + * If paging is enabled there should be at least 2 more sections left
> + * (one for CSS and one for Paging data)
> + */
> + if (sec_idx >= nitems(image->fw_sect) - 1) {
> + printf("%s: Paging: Missing CSS and/or paging sections\n",
> +    DEVNAME(sc));
> + iwm_free_fw_paging(sc);
> + return EINVAL;
> + }
> +
> + /* copy the CSS block to the dram */
> + DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n",
> +    DEVNAME(sc), sec_idx));
> +
> + memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
> +    image->fw_sect[sec_idx].fws_data,
> +    sc->fw_paging_db[0].fw_paging_size);
> +
> + DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
> +    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
> +
> + sec_idx++;
> +
> + /*
> + * copy the paging blocks to the dram
> + * loop index start from 1 since that CSS block already copied to dram
> + * and CSS index is 0.
> + * loop stop at num_of_paging_blk since that last block is not full.
> + */
> + for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
> + memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
> +    (const char *)image->fw_sect[sec_idx].fws_data + offset,
> +    sc->fw_paging_db[idx].fw_paging_size);
> +
> + DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
> +    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
> +
> + offset += sc->fw_paging_db[idx].fw_paging_size;
> + }
> +
> + /* copy the last paging block */
> + if (sc->num_of_pages_in_last_blk > 0) {
> + memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
> +    (const char *)image->fw_sect[sec_idx].fws_data + offset,
> +    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
> +
> + DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
> +    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
> + }
> +
> + return 0;
> +}
> +
> +int
> +iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
> +{
> + int blk_idx = 0;
> + int error, num_of_pages;
> +
> + if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
> + int i;
> + /* Device got reset, and we setup firmware paging again */
> + bus_dmamap_sync(sc->sc_dmat,
> +    sc->fw_paging_db[0].fw_paging_block.map,
> +    0, IWM_FW_PAGING_SIZE,
> +    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
> + for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
> + bus_dmamap_sync(sc->sc_dmat,
> +    sc->fw_paging_db[i].fw_paging_block.map,
> +    0, IWM_PAGING_BLOCK_SIZE,
> +    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
> + }
> + return 0;
> + }
> +
> + /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
> +#if (1 << IWM_BLOCK_2_EXP_SIZE) != IWM_PAGING_BLOCK_SIZE
> +#error IWM_BLOCK_2_EXP_SIZE must be power of 2 of IWM_PAGING_BLOCK_SIZE
> +#endif
> +
> + num_of_pages = image->paging_mem_size / IWM_FW_PAGING_SIZE;
> + sc->num_of_paging_blk =
> +    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
> +
> + sc->num_of_pages_in_last_blk =
> + num_of_pages -
> + IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
> +
> + DPRINTF(("%s: Paging: allocating mem for %d paging blocks, each block"
> +    " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
> +    sc->num_of_paging_blk,
> +    sc->num_of_pages_in_last_blk));
> +
> + /* allocate block of 4Kbytes for paging CSS */
> + error = iwm_dma_contig_alloc(sc->sc_dmat,
> +    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
> +    4096);
> + if (error) {
> + /* free all the previous pages since we failed */
> + iwm_free_fw_paging(sc);
> + return ENOMEM;
> + }
> +
> + sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
> +
> + DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
> +    DEVNAME(sc)));
> +
> + /*
> + * allocate blocks in dram.
> + * since that CSS allocated in fw_paging_db[0] loop start from index 1
> + */
> + for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
> + /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
> + /* XXX Use iwm_dma_contig_alloc for allocating */
> + error = iwm_dma_contig_alloc(sc->sc_dmat,
> +     &sc->fw_paging_db[blk_idx].fw_paging_block,
> +    IWM_PAGING_BLOCK_SIZE, 4096);
> + if (error) {
> + /* free all the previous pages since we failed */
> + iwm_free_fw_paging(sc);
> + return ENOMEM;
> + }
> +
> + sc->fw_paging_db[blk_idx].fw_paging_size =
> +    IWM_PAGING_BLOCK_SIZE;
> +
> + DPRINTF((
> +    "%s: Paging: allocated 32K bytes for firmware paging.\n",
> +    DEVNAME(sc)));
> + }
> +
> + return 0;
> +}
> +
> +int
> +iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
> +{
> + int ret;
> +
> + ret = iwm_alloc_fw_paging_mem(sc, fw);
> + if (ret)
> + return ret;
> +
> + return iwm_fill_paging_mem(sc, fw);
> +}
> +
> +/* send paging cmd to FW in case CPU2 has paging image */
> +int
> +iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
> +{
> + int blk_idx;
> + uint32_t dev_phy_addr;
> + struct iwm_fw_paging_cmd fw_paging_cmd = {
> + .flags =
> + htole32(IWM_PAGING_CMD_IS_SECURED |
> + IWM_PAGING_CMD_IS_ENABLED |
> + (sc->num_of_pages_in_last_blk <<
> + IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
> + .block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
> + .block_num = htole32(sc->num_of_paging_blk),
> + };
> +
> + /* loop for for all paging blocks + CSS block */
> + for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
> + dev_phy_addr = htole32(
> +    sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
> +    IWM_PAGE_2_EXP_SIZE);
> + fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
> + bus_dmamap_sync(sc->sc_dmat,
> +    sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
> +    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
> +    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
> + }
> +
> + return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
> +       IWM_LONG_GROUP, 0),
> +    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
> +}
> +
> +int
>  iwm_init_hw(struct iwm_softc *sc)
>  {
>   struct ieee80211com *ic = &sc->sc_ic;
> @@ -7188,6 +7472,8 @@ iwm_notif_intr(struct iwm_softc *sc)
>   case IWM_REMOVE_STA:
>   case IWM_TXPATH_FLUSH:
>   case IWM_LQ_CMD:
> + case IWM_WIDE_ID(IWM_LONG_GROUP,
> + IWM_FW_PAGING_BLOCK_CMD):
>   case IWM_BT_CONFIG:
>   case IWM_REPLY_THERMAL_MNG_BACKOFF:
>   case IWM_NVM_ACCESS_CMD:
> blob - 8919f30dc6b86e23fb9433fbe2998394c17eb7b6
> blob + 1e6308bbd5b4958427d4a354fb4f339688bbc8d1
> --- sys/dev/pci/if_iwmreg.h
> +++ sys/dev/pci/if_iwmreg.h
> @@ -1737,6 +1737,9 @@ struct iwm_agn_scd_bc_tbl {
>  #define IWM_CALIBRATION_COMPLETE_NOTIFICATION 0x67
>  #define IWM_RADIO_VERSION_NOTIFICATION 0x68
>  
> +/* paging block to FW cpu2 */
> +#define IWM_FW_PAGING_BLOCK_CMD 0x4f
> +
>  /* Scan offload */
>  #define IWM_SCAN_OFFLOAD_REQUEST_CMD 0x51
>  #define IWM_SCAN_OFFLOAD_ABORT_CMD 0x52
> @@ -2099,6 +2102,57 @@ struct iwm_nvm_access_cmd {
>   uint16_t length;
>   uint8_t data[];
>  } __packed; /* IWM_NVM_ACCESS_CMD_API_S_VER_2 */
> +
> +/*
> + * Block paging calculations
> + */
> +#define IWM_PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
> +#define IWM_FW_PAGING_SIZE (1 << IWM_PAGE_2_EXP_SIZE) /* page size is 4KB */
> +#define IWM_PAGE_PER_GROUP_2_EXP_SIZE 3
> +/* 8 pages per group */
> +#define IWM_NUM_OF_PAGE_PER_GROUP (1 << IWM_PAGE_PER_GROUP_2_EXP_SIZE)
> +/* don't change, support only 32KB size */
> +#define IWM_PAGING_BLOCK_SIZE (IWM_NUM_OF_PAGE_PER_GROUP * IWM_FW_PAGING_SIZE)
> +/* 32K == 2^15 */
> +#define IWM_BLOCK_2_EXP_SIZE (IWM_PAGE_2_EXP_SIZE + IWM_PAGE_PER_GROUP_2_EXP_SIZE)
> +
> +/*
> + * Image paging calculations
> + */
> +#define IWM_BLOCK_PER_IMAGE_2_EXP_SIZE 5
> +/* 2^5 == 32 blocks per image */
> +#define IWM_NUM_OF_BLOCK_PER_IMAGE (1 << IWM_BLOCK_PER_IMAGE_2_EXP_SIZE)
> +/* maximum image size 1024KB */
> +#define IWM_MAX_PAGING_IMAGE_SIZE (IWM_NUM_OF_BLOCK_PER_IMAGE * IWM_PAGING_BLOCK_SIZE)
> +
> +/* Virtual address signature */
> +#define IWM_PAGING_ADDR_SIG 0xAA000000
> +
> +#define IWM_PAGING_CMD_IS_SECURED (1 << 9)
> +#define IWM_PAGING_CMD_IS_ENABLED (1 << 8)
> +#define IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0
> +#define IWM_PAGING_TLV_SECURE_MASK 1
> +
> +#define IWM_NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
> +
> +/*
> + * struct iwm_fw_paging_cmd - paging layout
> + *
> + * (IWM_FW_PAGING_BLOCK_CMD = 0x4f)
> + *
> + * Send to FW the paging layout in the driver.
> + *
> + * @flags: various flags for the command
> + * @block_size: the block size in powers of 2
> + * @block_num: number of blocks specified in the command.
> + * @device_phy_addr: virtual addresses from device side
> +*/
> +struct iwm_fw_paging_cmd {
> + uint32_t flags;
> + uint32_t block_size;
> + uint32_t block_num;
> + uint32_t device_phy_addr[IWM_NUM_OF_FW_PAGING_BLOCKS];
> +} __packed; /* IWM_FW_PAGING_BLOCK_CMD_API_S_VER_1 */
>  
>  /**
>   * struct iwm_nvm_access_resp_ver2 - response to IWM_NVM_ACCESS_CMD
> blob - 0eaf07bed8dfc0ec674e55b3da211ef88ad93dfe
> blob + edf48df74c872bdeab3a08c65bf2434e4f297509
> --- sys/dev/pci/if_iwmvar.h
> +++ sys/dev/pci/if_iwmvar.h
> @@ -173,6 +173,7 @@ struct iwm_fw_info {
>   } fw_sect[IWM_UCODE_SECT_MAX];
>   size_t fw_totlen;
>   int fw_count;
> + uint32_t paging_mem_size;
>   } fw_sects[IWM_UCODE_TYPE_MAX];
>  };
>  
> @@ -237,6 +238,16 @@ struct iwm_dma_info {
>   bus_size_t size;
>  };
>  
> +/**
> + * struct iwm_fw_paging
> + * @fw_paging_block: dma memory info
> + * @fw_paging_size: page size
> + */
> +struct iwm_fw_paging {
> + struct iwm_dma_info fw_paging_block;
> + uint32_t fw_paging_size;
> +};
> +
>  #define IWM_TX_RING_COUNT 256
>  #define IWM_TX_RING_LOMARK 192
>  #define IWM_TX_RING_HIMARK 224
> @@ -486,6 +497,14 @@ struct iwm_softc {
>  
>   int host_interrupt_operation_mode;
>   int sc_ltr_enabled;
> +
> + /*
> + * Paging parameters - All of the parameters should be set by the
> + * opmode when paging is enabled
> + */
> + struct iwm_fw_paging fw_paging_db[IWM_NUM_OF_FW_PAGING_BLOCKS];
> + uint16_t num_of_paging_blk;
> + uint16_t num_of_pages_in_last_blk;
>  
>  #if NBPFILTER > 0
>   caddr_t sc_drvbpf;
>
>

Reply | Threaded
Open this post in threaded view
|

Re: iwm: add support for firmware paging

Stefan Sperling-5
On Mon, Oct 28, 2019 at 08:48:41PM +0100, Mark Kettenis wrote:

> > Date: Mon, 28 Oct 2019 19:28:29 +0100
> > From: Stefan Sperling <[hidden email]>
> >
> > Newer iwm(4) firmware versions will require paging to host DRAM.
> > This diff implements support for this. It was written by Imre Vadasz in 2017.
> >
> > I would like to get review by people with experience in the kernel memory
> > subsystem and DMA, to check that this diff isn't doing something stupid.
>
> I have trouble understanding what the bus_dmamap_sync() calls in
> iwm_alloc_fw_paging_mem() are supposed to achieve.

The driver has to initialize these pages with data provided in the firmware
image. This is done with memcpy(). I guess these bus_dma_sync calls are
supposed to ensure that the correct data appears on the device side when
the pages are read from there. Note that the syncs occur both before memcpy()
in iwm_fill_paging_mem() and after memcpy() in iwm_send_paging_cmd().
x86-specific behaviour aside, is there any reason why those syncs should
not be required?

I am a bit worried that iwm_fw_paging() is only called in error paths.
Allocated pages should probably be torn down either when the device goes
down, or when it is detached. We don't currently implement detach in this
driver, so the pages are never freed if no errors occur (the same applies
to Rx/Tx rings, so perhaps keeping the allocation persistent is OK).
Do you think we should alloc/free these pages whenever we do up/down?

Reply | Threaded
Open this post in threaded view
|

Re: iwm: add support for firmware paging

Mark Kettenis
> Date: Tue, 29 Oct 2019 11:31:31 +0100
> From: Stefan Sperling <[hidden email]>
>
> On Mon, Oct 28, 2019 at 08:48:41PM +0100, Mark Kettenis wrote:
> > > Date: Mon, 28 Oct 2019 19:28:29 +0100
> > > From: Stefan Sperling <[hidden email]>
> > >
> > > Newer iwm(4) firmware versions will require paging to host DRAM.
> > > This diff implements support for this. It was written by Imre Vadasz in 2017.
> > >
> > > I would like to get review by people with experience in the kernel memory
> > > subsystem and DMA, to check that this diff isn't doing something stupid.
> >
> > I have trouble understanding what the bus_dmamap_sync() calls in
> > iwm_alloc_fw_paging_mem() are supposed to achieve.
>
> The driver has to initialize these pages with data provided in the firmware
> image. This is done with memcpy(). I guess these bus_dma_sync calls are
> supposed to ensure that the correct data appears on the device side when
> the pages are read from there. Note that the syncs occur both before memcpy()
> in iwm_fill_paging_mem() and after memcpy() in iwm_send_paging_cmd().
> x86-specific behaviour aside, is there any reason why those syncs should
> not be required?

Unless the deviceactually writes to those pages, the syncs before the
memcpy() should not be necessary.  You do need to sync after the
memcpy() although on x86 that is mostly for the compiler's benefit, to
make sure it doesn't reorder things in a way that pokes the hardware
before the memcpy() is done.

> I am a bit worried that iwm_fw_paging() is only called in error paths.
> Allocated pages should probably be torn down either when the device goes
> down, or when it is detached. We don't currently implement detach in this
> driver, so the pages are never freed if no errors occur (the same applies
> to Rx/Tx rings, so perhaps keeping the allocation persistent is OK).
> Do you think we should alloc/free these pages whenever we do up/down?

That might not be the best approach as you run the risk that
allocations will fail if you up the interface again later on.  This is
more likely to be an issue if you need phys contig memory, as such
allocations may fail even when there is plenty of free memory due to
fragmentations.

Reply | Threaded
Open this post in threaded view
|

Re: iwm: add support for firmware paging

Stefan Sperling-5
On Tue, Oct 29, 2019 at 11:57:21AM +0100, Mark Kettenis wrote:

> > Date: Tue, 29 Oct 2019 11:31:31 +0100
> > From: Stefan Sperling <[hidden email]>
> >
> > On Mon, Oct 28, 2019 at 08:48:41PM +0100, Mark Kettenis wrote:
> > > > Date: Mon, 28 Oct 2019 19:28:29 +0100
> > > > From: Stefan Sperling <[hidden email]>
> > > >
> > > > Newer iwm(4) firmware versions will require paging to host DRAM.
> > > > This diff implements support for this. It was written by Imre Vadasz in 2017.
> > > >
> > > > I would like to get review by people with experience in the kernel memory
> > > > subsystem and DMA, to check that this diff isn't doing something stupid.
> > >
> > > I have trouble understanding what the bus_dmamap_sync() calls in
> > > iwm_alloc_fw_paging_mem() are supposed to achieve.
> >
> > The driver has to initialize these pages with data provided in the firmware
> > image. This is done with memcpy(). I guess these bus_dma_sync calls are
> > supposed to ensure that the correct data appears on the device side when
> > the pages are read from there. Note that the syncs occur both before memcpy()
> > in iwm_fill_paging_mem() and after memcpy() in iwm_send_paging_cmd().
> > x86-specific behaviour aside, is there any reason why those syncs should
> > not be required?
>
> Unless the deviceactually writes to those pages, the syncs before the
> memcpy() should not be necessary.  You do need to sync after the
> memcpy() although on x86 that is mostly for the compiler's benefit, to
> make sure it doesn't reorder things in a way that pokes the hardware
> before the memcpy() is done.

But syncs won't do any harm even in case the device does not perform
any writes, correct?

I haven't yet checked whether the firmware actually modifies those pages.
And I don't know if such behaviour would always occur or be triggered in
specific circumstances.
 

> > I am a bit worried that iwm_fw_paging() is only called in error paths.
> > Allocated pages should probably be torn down either when the device goes
> > down, or when it is detached. We don't currently implement detach in this
> > driver, so the pages are never freed if no errors occur (the same applies
> > to Rx/Tx rings, so perhaps keeping the allocation persistent is OK).
> > Do you think we should alloc/free these pages whenever we do up/down?
>
> That might not be the best approach as you run the risk that
> allocations will fail if you up the interface again later on.  This is
> more likely to be an issue if you need phys contig memory, as such
> allocations may fail even when there is plenty of free memory due to
> fragmentations.

Right. So a persistent allocation, as implemented by the patch, makes sense.

Reply | Threaded
Open this post in threaded view
|

Re: iwm: add support for firmware paging

Krystian Lewandowski-2
In reply to this post by Stefan Sperling-5
Hi Stefan,
I tested it for dozen hours of YT streaming and usual web browsing.
No issues observed.

iwm0: hw rev 0x230, fw ver 22.361476.0, address e4:0e:ee:81:3d:a0

--
Krystian