admin管理员组

文章数量:1403226

I am facing a specific issue during image stitching. The stitching method provided by OpenCV doesn’t work properly in my case, likely due to insufficient overlap between the images.

To address this issue, I created a custom script to correctly position each tile, considering rotation and both horizontal (overlapX) and vertical (overlapY) overlaps. However, when merging these tiles, I encounter noticeable brightness differences, resulting in a visually inconsistent final image. I can provide the image tiles and the stitching script if needed.

Could anyone suggest how to adjust or harmonize the brightness of the images during this stitching process? Are there particular methods or techniques recommended within OpenCV?

I try this code but it seems not work : I get the same image

import os
import sys
import cv2
import numpy as np
import math


def main():
    import cv2.detail
    
    if len(sys.argv)<4:
        print("Usage: python exposure_compensator_with_mask.py <inputDir> <overlapX> <overlapY>")
        sys.exit(1)

    inputDir = sys.argv[1]
    overlapX = int(sys.argv[2])
    overlapY = int(sys.argv[3])

    if not os.path.isdir(inputDir):
        print("Dossier introuvable:", inputDir)
        sys.exit(1)

    all_files = [f for f in os.listdir(inputDir) if f.lower().endswith(('.jpg','.jpeg','.png'))]
    if not all_files:
        print("Aucune image .jpg/.png trouvée")
        sys.exit(1)

    images = []
    masks  = []
    corners= []

    current_x=0
    for f in all_files:
        path = os.path.join(inputDir, f)
        img = cv2.imread(path)
        if img is None:
            print("Impossible de lire:", path)
            continue

        h,w = img.shape[:2]

        mask = np.zeros((h,w), dtype=np.uint8)
        zoneW = max(0, w-overlapX)
        zoneH = max(0, h-overlapY)
        mask[0:zoneH, 0:zoneW] = 255

        images.append(img)
        masks.append(mask)

        corners.append( (current_x, 0) )
        current_x += (w - 1) 

    if not images:
        sys.exit(0)

    import cv2.detail
    detail = cv2.detail
    compensator = detail.ExposureCompensator_createDefault(detail.ExposureCompensator_GAIN)
    compensator.feed(corners, images, masks)

    for i in range(len(images)):
        compensator.apply(i, corners[i], images[i], masks[i])

    outDir = os.path.join(inputDir, "corrected")
    os.makedirs(outDir, exist_ok=True)
    for i,f in enumerate(all_files):
        outPath = os.path.join(outDir, f)
        cv2.imwrite(outPath, images[i])
        print("Image corrigée =>", outPath)

    print("Terminé.")

if __name__ == "__main__":
    main()

EDIT : this is the script to stitch images. to use it :

npm i sharp
node mosaic_serpentin_rotate.js ./vignettes out-s.png 680 440 0 60 1.7

File mosaic_serpentin_rotate.js

const fs = require('fs');
const path = require('path');
const sharp = require('sharp');

// Regex pour reconnaître 123456789-a-[x]-[y].jpg
const regex = /^123456789-a-(\d+)-(\d+)\.(jpg|jpeg|png)$/i;

// Récupération des arguments
const [,, inputDir, outputFile, overlapXArg, overlapYArg, offsetXArg, offsetYArg, rotateArg] = process.argv;

const overlapX = overlapXArg ? parseInt(overlapXArg, 10) : 50;
const overlapY = overlapYArg ? parseInt(overlapYArg, 10) : 50;
const offsetX  = offsetXArg  ? parseInt(offsetXArg, 10)  : 0;
const offsetY  = offsetYArg  ? parseInt(offsetYArg, 10)  : 0;
const rotateAngle = rotateArg ? parseFloat(rotateArg)    : 0; // en degrés

async function main() {
  if (!inputDir || !outputFile) {
    console.error('Usage: node mosaic_serpentin_rotate.js <dir> <out.png> [overlapX=50] [overlapY=50] [offsetX=0] [offsetY=0] [rotateAngle=0]');
    process.exit(1);
  }

  // 1) Grouper par colonne x => columns[x] = [{y, file}, ...]
  const columns = {};
  const files = fs.readdirSync(inputDir);
  for (const f of files) {
    const m = f.match(regex);
    if (m) {
      const x = parseInt(m[1], 10);
      const y = parseInt(m[2], 10);
      if (!columns[x]) columns[x] = [];
      columns[x].push({ y, file: f });
    }
  }

  // Tri des colonnes
  const allXs = Object.keys(columns).map(Number).sort((a,b)=>a-b);
  if (!allXs.length) {
    console.error('Aucune image détectée selon la regex 123456789-a-[x]-[y].jpg');
    process.exit(1);
  }

  // 2) Lire dimension tile (exemple => 1er fichier de la 1re col)
  const firstFile = columns[allXs[0]][0].file;
  let meta;
  try {
    meta = await sharp(path.join(inputDir, firstFile)).metadata();
  } catch (err) {
    console.error('Erreur lecture image exemple:', err);
    process.exit(1);
  }
  const tileW = meta.width;
  const tileH = meta.height;

  // 3) Calculer dimension finale approximative
  const numCols = allXs.length;
  const maxTilesInACol = Math.max(...Object.values(columns).map(arr => arr.length));
  const finalWidth = tileW + (numCols - 1)*(tileW - overlapX + offsetX);
  //const finalHeight = tileH + (maxTilesInACol - 1)*(tileH - overlapY + offsetY);
  const finalHeight = tileH + (maxTilesInACol - 1)*(tileH - overlapY);

  console.log(`Tiles = ${tileW}x${tileH}, final ~ ${finalWidth} x ${finalHeight}`);
  console.log(`overlapX=${overlapX}, overlapY=${overlapY}, offsetX=${offsetX}, offsetY=${offsetY}, rotateAngle=${rotateAngle}`);

  // 4) Construire la liste de composite => rotation + position
  const tasks = [];

  // On parcourt chaque colonne x dans l'ordre
  for (let i = 0; i < numCols; i++) {
    const xVal = allXs[i];
    let colArr = columns[xVal];

    // Si xVal est pair => y asc, sinon y desc
    // => serpentin vertical
    if (xVal % 2 === 0) {
      colArr.sort((a,b) => a.y - b.y);
    } else {
      colArr.sort((a,b) => b.y - a.y);
    }

    // posX
    const baseX = i * (tileW - overlapX);

    // Parcourir chaque tuile de la colonne, dans l'ordre imposé ci-dessus
    for (let idx = 0; idx < colArr.length; idx++) {
      const { y, file } = colArr[idx];
      const fpath = path.join(inputDir, file);

      // index => 0.. => position verticale
      //const posY = idx * (tileH - overlapY) - offsetY * idx;
      //const posY = (finalHeight - tileH) - idx * (tileH + overlapY + offsetY);
      let posY = (finalHeight - tileH) - y * (tileH - overlapY);
      if (xVal % 2 !== 0) 
        posY = idx * (tileH - overlapY);

      if (xVal % 2 === 0)
        posY+=offsetY/2;
      else
        posY-=offsetY/2;

      if (xVal % 2 === 0 && y==0)
        posY-=offsetY;
      if (xVal % 2 !== 0 && y==colArr.length-1)
        posY+=offsetY;

      // Charger + pivoter => renvoyer un buffer => ensemble posite
      const p = sharp(fpath)
        .rotate(rotateAngle, { background: { r:0, g:0, b:0, alpha:0 } })
        .png()  // RGBA
        .toBuffer()
        .then(buf => ({
          input: buf,
          left: baseX - offsetX * y,
          top: posY,
          blend: 'over'
        }));

      tasks.push(p);
    }
  }

  let compositeLayers;
  try {
    compositeLayers = await Promise.all(tasks);
  } catch (err) {
    console.error('Erreur lors de la rotation:', err);
    process.exit(1);
  }

  // 5) Créer le canevas => RGBA
  // => Sortie en PNG => conservez la transparence si tuiles pivotées + trous
  try {
    await sharp({
      create: {
        width: finalWidth,
        height: finalHeight,
        channels: 4,
        background: { r:0, g:0, b:0, alpha:0 }
      }
    })
    posite(compositeLayers)
    .png()
    .toFile(outputFile);

    console.log(`Assemblage terminé => ${outputFile}`);
  } catch (err) {
    console.error('Erreur finale composite:', err);
  }
}

main().catch(err => {
  console.error('Erreur inattendue:', err);
});

本文标签: pythonBrightness differences in stitchingStack Overflow